[llvm] 151602c - [Inline] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 8 01:05:33 PST 2022


Author: Nikita Popov
Date: 2022-12-08T10:05:23+01:00
New Revision: 151602c7a9935558ca671b35359989b261045db0

URL: https://github.com/llvm/llvm-project/commit/151602c7a9935558ca671b35359989b261045db0
DIFF: https://github.com/llvm/llvm-project/commit/151602c7a9935558ca671b35359989b261045db0.diff

LOG: [Inline] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/Inline/2003-09-14-InlineValue.ll
    llvm/test/Transforms/Inline/2003-09-22-PHINodeInlineFail.ll
    llvm/test/Transforms/Inline/2003-09-22-PHINodesInExceptionDest.ll
    llvm/test/Transforms/Inline/2003-09-22-PHINodesInNormalInvokeDest.ll
    llvm/test/Transforms/Inline/2003-10-13-AllocaDominanceProblem.ll
    llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
    llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
    llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
    llvm/test/Transforms/Inline/2009-05-07-CallUsingSelfCrash.ll
    llvm/test/Transforms/Inline/AArch64/binop.ll
    llvm/test/Transforms/Inline/AArch64/gep-cost.ll
    llvm/test/Transforms/Inline/AArch64/phi.ll
    llvm/test/Transforms/Inline/AArch64/select.ll
    llvm/test/Transforms/Inline/AArch64/switch.ll
    llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument-cost.ll
    llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
    llvm/test/Transforms/Inline/AMDGPU/inline-amdgpu-vecbonus.ll
    llvm/test/Transforms/Inline/AMDGPU/inline-hint.ll
    llvm/test/Transforms/Inline/ARM/loop-add.ll
    llvm/test/Transforms/Inline/ARM/loop-memcpy.ll
    llvm/test/Transforms/Inline/ARM/loop-noinline.ll
    llvm/test/Transforms/Inline/ML/Inputs/test-module.ll
    llvm/test/Transforms/Inline/ML/scc-dead-accounting.ll
    llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
    llvm/test/Transforms/Inline/ML/state-tracking-scc-splits.ll
    llvm/test/Transforms/Inline/X86/switch.ll
    llvm/test/Transforms/Inline/align.ll
    llvm/test/Transforms/Inline/alloca-in-scc.ll
    llvm/test/Transforms/Inline/alloca_test.ll
    llvm/test/Transforms/Inline/always-inline-attr.ll
    llvm/test/Transforms/Inline/always-inline-remark.ll
    llvm/test/Transforms/Inline/always-inline.ll
    llvm/test/Transforms/Inline/arg-attr-propagation.ll
    llvm/test/Transforms/Inline/basictest.ll
    llvm/test/Transforms/Inline/bfi-update.ll
    llvm/test/Transforms/Inline/blockaddress.ll
    llvm/test/Transforms/Inline/byref-align.ll
    llvm/test/Transforms/Inline/byval-align.ll
    llvm/test/Transforms/Inline/byval-tail-call.ll
    llvm/test/Transforms/Inline/byval.ll
    llvm/test/Transforms/Inline/byval_lifetime.ll
    llvm/test/Transforms/Inline/callgraph-update.ll
    llvm/test/Transforms/Inline/cgscc-cycle.ll
    llvm/test/Transforms/Inline/cgscc-incremental-invalidate.ll
    llvm/test/Transforms/Inline/cgscc-inline-replay.ll
    llvm/test/Transforms/Inline/cgscc-invalidate.ll
    llvm/test/Transforms/Inline/cgscc-update.ll
    llvm/test/Transforms/Inline/crash-lifetime-marker.ll
    llvm/test/Transforms/Inline/crash.ll
    llvm/test/Transforms/Inline/crash2.ll
    llvm/test/Transforms/Inline/debug-invoke.ll
    llvm/test/Transforms/Inline/delete-function-with-metadata-use.ll
    llvm/test/Transforms/Inline/deleted-scc.ll
    llvm/test/Transforms/Inline/deoptimize-intrinsic.ll
    llvm/test/Transforms/Inline/devirtualize-2.ll
    llvm/test/Transforms/Inline/devirtualize-3.ll
    llvm/test/Transforms/Inline/devirtualize-4.ll
    llvm/test/Transforms/Inline/devirtualize-5.ll
    llvm/test/Transforms/Inline/devirtualize.ll
    llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
    llvm/test/Transforms/Inline/dynamic_alloca_test.ll
    llvm/test/Transforms/Inline/ephemeral.ll
    llvm/test/Transforms/Inline/frameescape.ll
    llvm/test/Transforms/Inline/function-count-update-3.ll
    llvm/test/Transforms/Inline/gep_from_constant.ll
    llvm/test/Transforms/Inline/gvn-inline-iteration.ll
    llvm/test/Transforms/Inline/inalloca-not-static.ll
    llvm/test/Transforms/Inline/inline-assume.ll
    llvm/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
    llvm/test/Transforms/Inline/inline-cost-annotation-pass.ll
    llvm/test/Transforms/Inline/inline-cost-dead-users.ll
    llvm/test/Transforms/Inline/inline-fast-math-flags.ll
    llvm/test/Transforms/Inline/inline-funclets.ll
    llvm/test/Transforms/Inline/inline-hot-callsite.ll
    llvm/test/Transforms/Inline/inline-indirect-chain.ll
    llvm/test/Transforms/Inline/inline-indirect.ll
    llvm/test/Transforms/Inline/inline-invoke-tail.ll
    llvm/test/Transforms/Inline/inline-invoke-with-asm-call.ll
    llvm/test/Transforms/Inline/inline-optsize.ll
    llvm/test/Transforms/Inline/inline-ptrtoint-different-sizes.ll
    llvm/test/Transforms/Inline/inline-recur-stacksize.ll
    llvm/test/Transforms/Inline/inline-remark-mandatory.ll
    llvm/test/Transforms/Inline/inline-remark.ll
    llvm/test/Transforms/Inline/inline-retainRV-call.ll
    llvm/test/Transforms/Inline/inline-stacksize.ll
    llvm/test/Transforms/Inline/inline-threshold.ll
    llvm/test/Transforms/Inline/inline-varargs.ll
    llvm/test/Transforms/Inline/inline-vla.ll
    llvm/test/Transforms/Inline/inline_call.ll
    llvm/test/Transforms/Inline/inline_cleanup.ll
    llvm/test/Transforms/Inline/inline_constprop.ll
    llvm/test/Transforms/Inline/inline_dbg_declare.ll
    llvm/test/Transforms/Inline/inline_inv_group.ll
    llvm/test/Transforms/Inline/inline_invoke.ll
    llvm/test/Transforms/Inline/inline_minisize.ll
    llvm/test/Transforms/Inline/inline_returns_twice.ll
    llvm/test/Transforms/Inline/inline_ssp.ll
    llvm/test/Transforms/Inline/inline_unreachable.ll
    llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
    llvm/test/Transforms/Inline/inlinedefault-threshold.ll
    llvm/test/Transforms/Inline/invariant-group-sroa.ll
    llvm/test/Transforms/Inline/invoke-cleanup.ll
    llvm/test/Transforms/Inline/invoke-combine-clauses.ll
    llvm/test/Transforms/Inline/invoke-cost.ll
    llvm/test/Transforms/Inline/invoke_test-1.ll
    llvm/test/Transforms/Inline/invoke_test-2.ll
    llvm/test/Transforms/Inline/invoke_test-3.ll
    llvm/test/Transforms/Inline/label-annotation.ll
    llvm/test/Transforms/Inline/last-callsite.ll
    llvm/test/Transforms/Inline/launder.invariant.group.ll
    llvm/test/Transforms/Inline/memprof_inline.ll
    llvm/test/Transforms/Inline/memprof_inline2.ll
    llvm/test/Transforms/Inline/module-inlining.ll
    llvm/test/Transforms/Inline/monster_scc.ll
    llvm/test/Transforms/Inline/nested-inline.ll
    llvm/test/Transforms/Inline/no-inline-line-tables.ll
    llvm/test/Transforms/Inline/no-unwind-inline-asm.ll
    llvm/test/Transforms/Inline/noalias-calls-always.ll
    llvm/test/Transforms/Inline/noalias-calls.ll
    llvm/test/Transforms/Inline/noalias-calls2.ll
    llvm/test/Transforms/Inline/noalias-cs.ll
    llvm/test/Transforms/Inline/noalias.ll
    llvm/test/Transforms/Inline/noalias2.ll
    llvm/test/Transforms/Inline/noalias3.ll
    llvm/test/Transforms/Inline/noinline-recursive-fn.ll
    llvm/test/Transforms/Inline/nonnull.ll
    llvm/test/Transforms/Inline/optimization-remarks.ll
    llvm/test/Transforms/Inline/parallel-loop-md-callee.ll
    llvm/test/Transforms/Inline/parallel-loop-md-merge.ll
    llvm/test/Transforms/Inline/parallel-loop-md.ll
    llvm/test/Transforms/Inline/partial-inline-act.ll
    llvm/test/Transforms/Inline/pr21206.ll
    llvm/test/Transforms/Inline/pr26698.ll
    llvm/test/Transforms/Inline/pr48209.ll
    llvm/test/Transforms/Inline/pr50270.ll
    llvm/test/Transforms/Inline/pr50589.ll
    llvm/test/Transforms/Inline/pr53206.ll
    llvm/test/Transforms/Inline/prof-update-instr.ll
    llvm/test/Transforms/Inline/prof-update-sample-alwaysinline.ll
    llvm/test/Transforms/Inline/prof-update-sample.ll
    llvm/test/Transforms/Inline/profile_meta_invoke.ll
    llvm/test/Transforms/Inline/ptr-diff.ll
    llvm/test/Transforms/Inline/recursive.ll
    llvm/test/Transforms/Inline/redundant-loads.ll
    llvm/test/Transforms/Inline/ret_attr_update.ll
    llvm/test/Transforms/Inline/store-sroa.ll
    llvm/test/Transforms/Inline/unwind-inline-asm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/Inline/2003-09-14-InlineValue.ll b/llvm/test/Transforms/Inline/2003-09-14-InlineValue.ll
index 9f2dd57cdf483..07a4244d3a3a0 100644
--- a/llvm/test/Transforms/Inline/2003-09-14-InlineValue.ll
+++ b/llvm/test/Transforms/Inline/2003-09-14-InlineValue.ll
@@ -8,7 +8,7 @@ define internal i32 @Callee() {
         ret i32 %J
 }
 
-define i32 @Caller() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @Caller() personality ptr @__gxx_personality_v0 {
         %V = invoke i32 @Callee( )
                         to label %Ok unwind label %Bad          ; <i32> [#uses=1]
 
@@ -16,7 +16,7 @@ Ok:             ; preds = %0
         ret i32 %V
 
 Bad:            ; preds = %0
-        %exn = landingpad {i8*, i32}
+        %exn = landingpad {ptr, i32}
                  cleanup
         ret i32 0
 }

diff  --git a/llvm/test/Transforms/Inline/2003-09-22-PHINodeInlineFail.ll b/llvm/test/Transforms/Inline/2003-09-22-PHINodeInlineFail.ll
index 1a2085d4d6a1e..37129eb13da55 100644
--- a/llvm/test/Transforms/Inline/2003-09-22-PHINodeInlineFail.ll
+++ b/llvm/test/Transforms/Inline/2003-09-22-PHINodeInlineFail.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -passes=inline -disable-output
 
-define i32 @main() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @main() personality ptr @__gxx_personality_v0 {
 entry:
         invoke void @__main( )
                         to label %LongJmpBlkPost unwind label %LongJmpBlkPre
@@ -10,7 +10,7 @@ LongJmpBlkPost:
 
 LongJmpBlkPre:
         %i.3 = phi i32 [ 0, %entry ]
-        %exn = landingpad {i8*, i32}
+        %exn = landingpad {ptr, i32}
                  cleanup
         ret i32 0
 }

diff  --git a/llvm/test/Transforms/Inline/2003-09-22-PHINodesInExceptionDest.ll b/llvm/test/Transforms/Inline/2003-09-22-PHINodesInExceptionDest.ll
index 038ba8e89798b..4457bdb4b03e3 100644
--- a/llvm/test/Transforms/Inline/2003-09-22-PHINodesInExceptionDest.ll
+++ b/llvm/test/Transforms/Inline/2003-09-22-PHINodesInExceptionDest.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -passes=inline -disable-output
 
-define i32 @main() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @main() personality ptr @__gxx_personality_v0 {
 entry:
         invoke void @__main( )
                         to label %Call2Invoke unwind label %LongJmpBlkPre
@@ -10,7 +10,7 @@ Call2Invoke:            ; preds = %entry
 
 LongJmpBlkPre:          ; preds = %Call2Invoke, %entry
         %i.3 = phi i32 [ 0, %entry ]
-        %exn = landingpad {i8*, i32}
+        %exn = landingpad {ptr, i32}
                  cleanup
         br label %exit
 

diff  --git a/llvm/test/Transforms/Inline/2003-09-22-PHINodesInNormalInvokeDest.ll b/llvm/test/Transforms/Inline/2003-09-22-PHINodesInNormalInvokeDest.ll
index 60fe0fea25db0..e7f15f94c512f 100644
--- a/llvm/test/Transforms/Inline/2003-09-22-PHINodesInNormalInvokeDest.ll
+++ b/llvm/test/Transforms/Inline/2003-09-22-PHINodesInNormalInvokeDest.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -passes=inline -disable-output
 
-define i32 @main() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @main() personality ptr @__gxx_personality_v0 {
 entry:
         invoke void @__main( )
                         to label %else unwind label %RethrowExcept
@@ -13,7 +13,7 @@ LJDecisionBB:           ; preds = %else
         br label %else
 
 RethrowExcept:          ; preds = %entry
-        %exn = landingpad {i8*, i32}
+        %exn = landingpad {ptr, i32}
                  cleanup
         ret i32 0
 }

diff  --git a/llvm/test/Transforms/Inline/2003-10-13-AllocaDominanceProblem.ll b/llvm/test/Transforms/Inline/2003-10-13-AllocaDominanceProblem.ll
index f071d0bf49c5f..d6e3a51d859b9 100644
--- a/llvm/test/Transforms/Inline/2003-10-13-AllocaDominanceProblem.ll
+++ b/llvm/test/Transforms/Inline/2003-10-13-AllocaDominanceProblem.ll
@@ -11,9 +11,9 @@ A:              ; preds = %reloadentry
 
 define internal void @callee() {
 entry:
-        %X = alloca i8, i32 0           ; <i8*> [#uses=0]
+        %X = alloca i8, i32 0           ; <ptr> [#uses=0]
         %Y = bitcast i32 0 to i32               ; <i32> [#uses=1]
-        %Z = alloca i8, i32 %Y          ; <i8*> [#uses=0]
+        %Z = alloca i8, i32 %Y          ; <ptr> [#uses=0]
         ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll b/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
index ea7c74aa103cb..bc5fe13e7f1d6 100644
--- a/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
+++ b/llvm/test/Transforms/Inline/2007-04-15-InlineEH.ll
@@ -15,13 +15,13 @@ declare void @c()
 define void @f() {
 ; CHECK-LABEL: define void @f()
 entry:
-  call void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* elementtype( i32) null, i32* elementtype(i32) null ) nounwind
+  call void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( ptr elementtype( i32) null, ptr elementtype(i32) null ) nounwind
 ; CHECK: call void asm
   unreachable
 }
 
-define void @g() personality i32 (...)* @__gxx_personality_v0 {
-; CHECK-LABEL: define void @g() personality i32 (...)* @__gxx_personality_v0
+define void @g() personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: define void @g() personality ptr @__gxx_personality_v0
 entry:
   invoke void @a() to label %invcont1 unwind label %cleanup
 ; CHECK-NOT: {{call|invoke}}
@@ -47,12 +47,12 @@ invcont4:
   ret void
 
 cleanup:
-  %ex = landingpad {i8*, i32} cleanup
-  resume { i8*, i32 } %ex
+  %ex = landingpad {ptr, i32} cleanup
+  resume { ptr, i32 } %ex
 }
 
 define void @h() {
-; CHECK-LABEL: define void @h() personality i32 (...)* @__gxx_personality_v0
+; CHECK-LABEL: define void @h() personality ptr @__gxx_personality_v0
 entry:
   call void @g()
 ; CHECK-NOT: {{call|invoke}}

diff  --git a/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll b/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
index c61419e612159..12e1f112573f4 100644
--- a/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
+++ b/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
@@ -2,26 +2,26 @@
 ; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
 ; Do not inline calls with variable-sized alloca.
 
- at q = common global i8* null
+ at q = common global ptr null
 
-define i8* @a(i32 %i) nounwind {
-; CHECK-LABEL: define i8* @a
+define ptr @a(i32 %i) nounwind {
+; CHECK-LABEL: define ptr @a
 entry:
   %i_addr = alloca i32
-  %retval = alloca i8*
-  %p = alloca i8*
+  %retval = alloca ptr
+  %p = alloca ptr
   %"alloca point" = bitcast i32 0 to i32
-  store i32 %i, i32* %i_addr
-  %0 = load i32, i32* %i_addr, align 4
+  store i32 %i, ptr %i_addr
+  %0 = load i32, ptr %i_addr, align 4
   %1 = alloca i8, i32 %0
-  store i8* %1, i8** %p, align 4
-  %2 = load i8*, i8** %p, align 4
-  store i8* %2, i8** @q, align 4
+  store ptr %1, ptr %p, align 4
+  %2 = load ptr, ptr %p, align 4
+  store ptr %2, ptr @q, align 4
   br label %return
 
 return:
-  %retval1 = load i8*, i8** %retval
-  ret i8* %retval1
+  %retval1 = load ptr, ptr %retval
+  ret ptr %retval1
 }
 
 define void @b(i32 %i) nounwind {
@@ -29,10 +29,10 @@ define void @b(i32 %i) nounwind {
 entry:
   %i_addr = alloca i32
   %"alloca point" = bitcast i32 0 to i32
-  store i32 %i, i32* %i_addr
-  %0 = load i32, i32* %i_addr, align 4
-  %1 = call i8* @a(i32 %0) nounwind
-; CHECK: call i8* @a
+  store i32 %i, ptr %i_addr
+  %0 = load i32, ptr %i_addr, align 4
+  %1 = call ptr @a(i32 %0) nounwind
+; CHECK: call ptr @a
   br label %return
 
 return:

diff  --git a/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll b/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
index cb7b67ca98f94..bec8033c6e8f4 100644
--- a/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
+++ b/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
@@ -2,15 +2,15 @@
 ; ModuleID = '<stdin>'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i386-apple-darwin9.6"
-	%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
- at NumNodes = external global i32		; <i32*> [#uses=0]
-@"\01LC" = external constant [43 x i8]		; <[43 x i8]*> [#uses=0]
-@"\01LC1" = external constant [19 x i8]		; <[19 x i8]*> [#uses=0]
-@"\01LC2" = external constant [17 x i8]		; <[17 x i8]*> [#uses=0]
+	%struct.quad_struct = type { i32, i32, ptr, ptr, ptr, ptr, ptr }
+ at NumNodes = external global i32		; <ptr> [#uses=0]
+@"\01LC" = external constant [43 x i8]		; <ptr> [#uses=0]
+@"\01LC1" = external constant [19 x i8]		; <ptr> [#uses=0]
+@"\01LC2" = external constant [17 x i8]		; <ptr> [#uses=0]
 
-declare i32 @dealwithargs(i32, i8** nocapture) nounwind
+declare i32 @dealwithargs(i32, ptr nocapture) nounwind
 
-declare i32 @atoi(i8*)
+declare i32 @atoi(ptr)
 
 define internal fastcc i32 @adj(i32 %d, i32 %ct) nounwind readnone {
 entry:
@@ -63,9 +63,9 @@ return:		; preds = %bb15, %bb15, %bb10, %bb10, %bb5, %bb5, %bb, %bb, %entry
 
 declare fastcc i32 @reflect(i32, i32) nounwind readnone
 
-declare i32 @CountTree(%struct.quad_struct* nocapture) nounwind readonly
+declare i32 @CountTree(ptr nocapture) nounwind readonly
 
-define internal fastcc %struct.quad_struct* @child(%struct.quad_struct* nocapture %tree, i32 %ct) nounwind readonly {
+define internal fastcc ptr @child(ptr nocapture %tree, i32 %ct) nounwind readonly {
 entry:
 	switch i32 %ct, label %bb5 [
 		i32 0, label %bb1
@@ -75,36 +75,36 @@ entry:
 	]
 
 bb:		; preds = %entry
-	%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3		; <%struct.quad_struct**> [#uses=1]
-	%1 = load %struct.quad_struct*, %struct.quad_struct** %0, align 4		; <%struct.quad_struct*> [#uses=1]
-	ret %struct.quad_struct* %1
+	%0 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 3		; <ptr> [#uses=1]
+	%1 = load ptr, ptr %0, align 4		; <ptr> [#uses=1]
+	ret ptr %1
 
 bb1:		; preds = %entry
-	%2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2		; <%struct.quad_struct**> [#uses=1]
-	%3 = load %struct.quad_struct*, %struct.quad_struct** %2, align 4		; <%struct.quad_struct*> [#uses=1]
-	ret %struct.quad_struct* %3
+	%2 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 2		; <ptr> [#uses=1]
+	%3 = load ptr, ptr %2, align 4		; <ptr> [#uses=1]
+	ret ptr %3
 
 bb2:		; preds = %entry
-	%4 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5		; <%struct.quad_struct**> [#uses=1]
-	%5 = load %struct.quad_struct*, %struct.quad_struct** %4, align 4		; <%struct.quad_struct*> [#uses=1]
-	ret %struct.quad_struct* %5
+	%4 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 5		; <ptr> [#uses=1]
+	%5 = load ptr, ptr %4, align 4		; <ptr> [#uses=1]
+	ret ptr %5
 
 bb3:		; preds = %entry
-	%6 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4		; <%struct.quad_struct**> [#uses=1]
-	%7 = load %struct.quad_struct*, %struct.quad_struct** %6, align 4		; <%struct.quad_struct*> [#uses=1]
-	ret %struct.quad_struct* %7
+	%6 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 4		; <ptr> [#uses=1]
+	%7 = load ptr, ptr %6, align 4		; <ptr> [#uses=1]
+	ret ptr %7
 
 bb5:		; preds = %entry
-	ret %struct.quad_struct* null
+	ret ptr null
 }
 
-define internal fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* nocapture %tree, i32 %d) nounwind readonly {
+define internal fastcc ptr @gtequal_adj_neighbor(ptr nocapture %tree, i32 %d) nounwind readonly {
 entry:
-	%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 6		; <%struct.quad_struct**> [#uses=1]
-	%1 = load %struct.quad_struct*, %struct.quad_struct** %0, align 4		; <%struct.quad_struct*> [#uses=4]
-	%2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 1		; <i32*> [#uses=1]
-	%3 = load i32, i32* %2, align 4		; <i32> [#uses=2]
-	%4 = icmp eq %struct.quad_struct* %1, null		; <i1> [#uses=1]
+	%0 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 6		; <ptr> [#uses=1]
+	%1 = load ptr, ptr %0, align 4		; <ptr> [#uses=4]
+	%2 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 1		; <ptr> [#uses=1]
+	%3 = load i32, ptr %2, align 4		; <i32> [#uses=2]
+	%4 = icmp eq ptr %1, null		; <i1> [#uses=1]
 	br i1 %4, label %bb3, label %bb
 
 bb:		; preds = %entry
@@ -113,96 +113,96 @@ bb:		; preds = %entry
 	br i1 %6, label %bb3, label %bb1
 
 bb1:		; preds = %bb
-	%7 = call fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* %1, i32 %d) nounwind		; <%struct.quad_struct*> [#uses=1]
+	%7 = call fastcc ptr @gtequal_adj_neighbor(ptr %1, i32 %d) nounwind		; <ptr> [#uses=1]
 	br label %bb3
 
 bb3:		; preds = %bb1, %bb, %entry
-	%q.0 = phi %struct.quad_struct* [ %7, %bb1 ], [ %1, %bb ], [ %1, %entry ]		; <%struct.quad_struct*> [#uses=4]
-	%8 = icmp eq %struct.quad_struct* %q.0, null		; <i1> [#uses=1]
+	%q.0 = phi ptr [ %7, %bb1 ], [ %1, %bb ], [ %1, %entry ]		; <ptr> [#uses=4]
+	%8 = icmp eq ptr %q.0, null		; <i1> [#uses=1]
 	br i1 %8, label %bb7, label %bb4
 
 bb4:		; preds = %bb3
-	%9 = getelementptr %struct.quad_struct, %struct.quad_struct* %q.0, i32 0, i32 0		; <i32*> [#uses=1]
-	%10 = load i32, i32* %9, align 4		; <i32> [#uses=1]
+	%9 = getelementptr %struct.quad_struct, ptr %q.0, i32 0, i32 0		; <ptr> [#uses=1]
+	%10 = load i32, ptr %9, align 4		; <i32> [#uses=1]
 	%11 = icmp eq i32 %10, 2		; <i1> [#uses=1]
 	br i1 %11, label %bb5, label %bb7
 
 bb5:		; preds = %bb4
 	%12 = call fastcc i32 @reflect(i32 %d, i32 %3) nounwind		; <i32> [#uses=1]
-	%13 = call fastcc %struct.quad_struct* @child(%struct.quad_struct* %q.0, i32 %12) nounwind		; <%struct.quad_struct*> [#uses=1]
-	ret %struct.quad_struct* %13
+	%13 = call fastcc ptr @child(ptr %q.0, i32 %12) nounwind		; <ptr> [#uses=1]
+	ret ptr %13
 
 bb7:		; preds = %bb4, %bb3
-	ret %struct.quad_struct* %q.0
+	ret ptr %q.0
 }
 
-declare fastcc i32 @sum_adjacent(%struct.quad_struct* nocapture, i32, i32, i32) nounwind readonly
+declare fastcc i32 @sum_adjacent(ptr nocapture, i32, i32, i32) nounwind readonly
 
-define i32 @perimeter(%struct.quad_struct* nocapture %tree, i32 %size) nounwind readonly {
+define i32 @perimeter(ptr nocapture %tree, i32 %size) nounwind readonly {
 entry:
-	%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0		; <i32*> [#uses=1]
-	%1 = load i32, i32* %0, align 4		; <i32> [#uses=1]
+	%0 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 0		; <ptr> [#uses=1]
+	%1 = load i32, ptr %0, align 4		; <i32> [#uses=1]
 	%2 = icmp eq i32 %1, 2		; <i1> [#uses=1]
 	br i1 %2, label %bb, label %bb2
 
 bb:		; preds = %entry
-	%3 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4		; <%struct.quad_struct**> [#uses=1]
-	%4 = load %struct.quad_struct*, %struct.quad_struct** %3, align 4		; <%struct.quad_struct*> [#uses=1]
+	%3 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 4		; <ptr> [#uses=1]
+	%4 = load ptr, ptr %3, align 4		; <ptr> [#uses=1]
 	%5 = sdiv i32 %size, 2		; <i32> [#uses=1]
-	%6 = call i32 @perimeter(%struct.quad_struct* %4, i32 %5) nounwind		; <i32> [#uses=1]
-	%7 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5		; <%struct.quad_struct**> [#uses=1]
-	%8 = load %struct.quad_struct*, %struct.quad_struct** %7, align 4		; <%struct.quad_struct*> [#uses=1]
+	%6 = call i32 @perimeter(ptr %4, i32 %5) nounwind		; <i32> [#uses=1]
+	%7 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 5		; <ptr> [#uses=1]
+	%8 = load ptr, ptr %7, align 4		; <ptr> [#uses=1]
 	%9 = sdiv i32 %size, 2		; <i32> [#uses=1]
-	%10 = call i32 @perimeter(%struct.quad_struct* %8, i32 %9) nounwind		; <i32> [#uses=1]
+	%10 = call i32 @perimeter(ptr %8, i32 %9) nounwind		; <i32> [#uses=1]
 	%11 = add i32 %10, %6		; <i32> [#uses=1]
-	%12 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3		; <%struct.quad_struct**> [#uses=1]
-	%13 = load %struct.quad_struct*, %struct.quad_struct** %12, align 4		; <%struct.quad_struct*> [#uses=1]
+	%12 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 3		; <ptr> [#uses=1]
+	%13 = load ptr, ptr %12, align 4		; <ptr> [#uses=1]
 	%14 = sdiv i32 %size, 2		; <i32> [#uses=1]
-	%15 = call i32 @perimeter(%struct.quad_struct* %13, i32 %14) nounwind		; <i32> [#uses=1]
+	%15 = call i32 @perimeter(ptr %13, i32 %14) nounwind		; <i32> [#uses=1]
 	%16 = add i32 %15, %11		; <i32> [#uses=1]
-	%17 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2		; <%struct.quad_struct**> [#uses=1]
-	%18 = load %struct.quad_struct*, %struct.quad_struct** %17, align 4		; <%struct.quad_struct*> [#uses=1]
+	%17 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 2		; <ptr> [#uses=1]
+	%18 = load ptr, ptr %17, align 4		; <ptr> [#uses=1]
 	%19 = sdiv i32 %size, 2		; <i32> [#uses=1]
-	%20 = call i32 @perimeter(%struct.quad_struct* %18, i32 %19) nounwind		; <i32> [#uses=1]
+	%20 = call i32 @perimeter(ptr %18, i32 %19) nounwind		; <i32> [#uses=1]
 	%21 = add i32 %20, %16		; <i32> [#uses=1]
 	ret i32 %21
 
 bb2:		; preds = %entry
-	%22 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0		; <i32*> [#uses=1]
-	%23 = load i32, i32* %22, align 4		; <i32> [#uses=1]
+	%22 = getelementptr %struct.quad_struct, ptr %tree, i32 0, i32 0		; <ptr> [#uses=1]
+	%23 = load i32, ptr %22, align 4		; <i32> [#uses=1]
 	%24 = icmp eq i32 %23, 0		; <i1> [#uses=1]
 	br i1 %24, label %bb3, label %bb23
 
 bb3:		; preds = %bb2
-	%25 = call fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* %tree, i32 0) nounwind		; <%struct.quad_struct*> [#uses=4]
-	%26 = icmp eq %struct.quad_struct* %25, null		; <i1> [#uses=1]
+	%25 = call fastcc ptr @gtequal_adj_neighbor(ptr %tree, i32 0) nounwind		; <ptr> [#uses=4]
+	%26 = icmp eq ptr %25, null		; <i1> [#uses=1]
 	br i1 %26, label %bb8, label %bb4
 
 bb4:		; preds = %bb3
-	%27 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0		; <i32*> [#uses=1]
-	%28 = load i32, i32* %27, align 4		; <i32> [#uses=1]
+	%27 = getelementptr %struct.quad_struct, ptr %25, i32 0, i32 0		; <ptr> [#uses=1]
+	%28 = load i32, ptr %27, align 4		; <i32> [#uses=1]
 	%29 = icmp eq i32 %28, 1		; <i1> [#uses=1]
 	br i1 %29, label %bb8, label %bb6
 
 bb6:		; preds = %bb4
-	%30 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0		; <i32*> [#uses=1]
-	%31 = load i32, i32* %30, align 4		; <i32> [#uses=1]
+	%30 = getelementptr %struct.quad_struct, ptr %25, i32 0, i32 0		; <ptr> [#uses=1]
+	%31 = load i32, ptr %30, align 4		; <i32> [#uses=1]
 	%32 = icmp eq i32 %31, 2		; <i1> [#uses=1]
 	br i1 %32, label %bb7, label %bb8
 
 bb7:		; preds = %bb6
-	%33 = call fastcc i32 @sum_adjacent(%struct.quad_struct* %25, i32 3, i32 2, i32 %size) nounwind		; <i32> [#uses=1]
+	%33 = call fastcc i32 @sum_adjacent(ptr %25, i32 3, i32 2, i32 %size) nounwind		; <i32> [#uses=1]
 	br label %bb8
 
 bb8:		; preds = %bb7, %bb6, %bb4, %bb3
 	%retval1.1 = phi i32 [ 0, %bb6 ], [ %33, %bb7 ], [ %size, %bb4 ], [ %size, %bb3 ]		; <i32> [#uses=3]
-	%34 = call fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* %tree, i32 1) nounwind		; <%struct.quad_struct*> [#uses=4]
-	%35 = icmp eq %struct.quad_struct* %34, null		; <i1> [#uses=1]
+	%34 = call fastcc ptr @gtequal_adj_neighbor(ptr %tree, i32 1) nounwind		; <ptr> [#uses=4]
+	%35 = icmp eq ptr %34, null		; <i1> [#uses=1]
 	br i1 %35, label %bb10, label %bb9
 
 bb9:		; preds = %bb8
-	%36 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0		; <i32*> [#uses=1]
-	%37 = load i32, i32* %36, align 4		; <i32> [#uses=1]
+	%36 = getelementptr %struct.quad_struct, ptr %34, i32 0, i32 0		; <ptr> [#uses=1]
+	%37 = load i32, ptr %36, align 4		; <i32> [#uses=1]
 	%38 = icmp eq i32 %37, 1		; <i1> [#uses=1]
 	br i1 %38, label %bb10, label %bb11
 
@@ -211,25 +211,25 @@ bb10:		; preds = %bb9, %bb8
 	br label %bb13
 
 bb11:		; preds = %bb9
-	%40 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0		; <i32*> [#uses=1]
-	%41 = load i32, i32* %40, align 4		; <i32> [#uses=1]
+	%40 = getelementptr %struct.quad_struct, ptr %34, i32 0, i32 0		; <ptr> [#uses=1]
+	%41 = load i32, ptr %40, align 4		; <i32> [#uses=1]
 	%42 = icmp eq i32 %41, 2		; <i1> [#uses=1]
 	br i1 %42, label %bb12, label %bb13
 
 bb12:		; preds = %bb11
-	%43 = call fastcc i32 @sum_adjacent(%struct.quad_struct* %34, i32 2, i32 0, i32 %size) nounwind		; <i32> [#uses=1]
+	%43 = call fastcc i32 @sum_adjacent(ptr %34, i32 2, i32 0, i32 %size) nounwind		; <i32> [#uses=1]
 	%44 = add i32 %43, %retval1.1		; <i32> [#uses=1]
 	br label %bb13
 
 bb13:		; preds = %bb12, %bb11, %bb10
 	%retval1.2 = phi i32 [ %retval1.1, %bb11 ], [ %44, %bb12 ], [ %39, %bb10 ]		; <i32> [#uses=3]
-	%45 = call fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* %tree, i32 2) nounwind		; <%struct.quad_struct*> [#uses=4]
-	%46 = icmp eq %struct.quad_struct* %45, null		; <i1> [#uses=1]
+	%45 = call fastcc ptr @gtequal_adj_neighbor(ptr %tree, i32 2) nounwind		; <ptr> [#uses=4]
+	%46 = icmp eq ptr %45, null		; <i1> [#uses=1]
 	br i1 %46, label %bb15, label %bb14
 
 bb14:		; preds = %bb13
-	%47 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0		; <i32*> [#uses=1]
-	%48 = load i32, i32* %47, align 4		; <i32> [#uses=1]
+	%47 = getelementptr %struct.quad_struct, ptr %45, i32 0, i32 0		; <ptr> [#uses=1]
+	%48 = load i32, ptr %47, align 4		; <i32> [#uses=1]
 	%49 = icmp eq i32 %48, 1		; <i1> [#uses=1]
 	br i1 %49, label %bb15, label %bb16
 
@@ -238,25 +238,25 @@ bb15:		; preds = %bb14, %bb13
 	br label %bb18
 
 bb16:		; preds = %bb14
-	%51 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0		; <i32*> [#uses=1]
-	%52 = load i32, i32* %51, align 4		; <i32> [#uses=1]
+	%51 = getelementptr %struct.quad_struct, ptr %45, i32 0, i32 0		; <ptr> [#uses=1]
+	%52 = load i32, ptr %51, align 4		; <i32> [#uses=1]
 	%53 = icmp eq i32 %52, 2		; <i1> [#uses=1]
 	br i1 %53, label %bb17, label %bb18
 
 bb17:		; preds = %bb16
-	%54 = call fastcc i32 @sum_adjacent(%struct.quad_struct* %45, i32 0, i32 1, i32 %size) nounwind		; <i32> [#uses=1]
+	%54 = call fastcc i32 @sum_adjacent(ptr %45, i32 0, i32 1, i32 %size) nounwind		; <i32> [#uses=1]
 	%55 = add i32 %54, %retval1.2		; <i32> [#uses=1]
 	br label %bb18
 
 bb18:		; preds = %bb17, %bb16, %bb15
 	%retval1.3 = phi i32 [ %retval1.2, %bb16 ], [ %55, %bb17 ], [ %50, %bb15 ]		; <i32> [#uses=3]
-	%56 = call fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* %tree, i32 3) nounwind		; <%struct.quad_struct*> [#uses=4]
-	%57 = icmp eq %struct.quad_struct* %56, null		; <i1> [#uses=1]
+	%56 = call fastcc ptr @gtequal_adj_neighbor(ptr %tree, i32 3) nounwind		; <ptr> [#uses=4]
+	%57 = icmp eq ptr %56, null		; <i1> [#uses=1]
 	br i1 %57, label %bb20, label %bb19
 
 bb19:		; preds = %bb18
-	%58 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0		; <i32*> [#uses=1]
-	%59 = load i32, i32* %58, align 4		; <i32> [#uses=1]
+	%58 = getelementptr %struct.quad_struct, ptr %56, i32 0, i32 0		; <ptr> [#uses=1]
+	%59 = load i32, ptr %58, align 4		; <i32> [#uses=1]
 	%60 = icmp eq i32 %59, 1		; <i1> [#uses=1]
 	br i1 %60, label %bb20, label %bb21
 
@@ -265,13 +265,13 @@ bb20:		; preds = %bb19, %bb18
 	ret i32 %61
 
 bb21:		; preds = %bb19
-	%62 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0		; <i32*> [#uses=1]
-	%63 = load i32, i32* %62, align 4		; <i32> [#uses=1]
+	%62 = getelementptr %struct.quad_struct, ptr %56, i32 0, i32 0		; <ptr> [#uses=1]
+	%63 = load i32, ptr %62, align 4		; <i32> [#uses=1]
 	%64 = icmp eq i32 %63, 2		; <i1> [#uses=1]
 	br i1 %64, label %bb22, label %bb23
 
 bb22:		; preds = %bb21
-	%65 = call fastcc i32 @sum_adjacent(%struct.quad_struct* %56, i32 1, i32 3, i32 %size) nounwind		; <i32> [#uses=1]
+	%65 = call fastcc i32 @sum_adjacent(ptr %56, i32 1, i32 3, i32 %size) nounwind		; <i32> [#uses=1]
 	%66 = add i32 %65, %retval1.3		; <i32> [#uses=1]
 	ret i32 %66
 
@@ -280,9 +280,9 @@ bb23:		; preds = %bb21, %bb2
 	ret i32 %retval1.0
 }
 
-declare i32 @main(i32, i8** nocapture) noreturn nounwind
+declare i32 @main(i32, ptr nocapture) noreturn nounwind
 
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
 
 declare void @exit(i32) noreturn nounwind
 
@@ -290,4 +290,4 @@ declare fastcc i32 @CheckOutside(i32, i32) nounwind readnone
 
 declare fastcc i32 @CheckIntersect(i32, i32, i32) nounwind readnone
 
-declare %struct.quad_struct* @MakeTree(i32, i32, i32, i32, i32, %struct.quad_struct*, i32, i32) nounwind
+declare ptr @MakeTree(i32, i32, i32, i32, i32, ptr, i32, i32) nounwind

diff  --git a/llvm/test/Transforms/Inline/2009-05-07-CallUsingSelfCrash.ll b/llvm/test/Transforms/Inline/2009-05-07-CallUsingSelfCrash.ll
index 13725924c0c27..9f60338ec75c2 100644
--- a/llvm/test/Transforms/Inline/2009-05-07-CallUsingSelfCrash.ll
+++ b/llvm/test/Transforms/Inline/2009-05-07-CallUsingSelfCrash.ll
@@ -4,7 +4,7 @@
 	%struct.S1 = type <{ i8, i8, i8, i8, %struct.S0 }>
 	%struct.S2 = type <{ %struct.S1, i32 }>
 
-define void @func_113(%struct.S1* noalias nocapture sret(%struct.S1) %agg.result, i8 signext %p_114) noreturn nounwind {
+define void @func_113(ptr noalias nocapture sret(%struct.S1) %agg.result, i8 signext %p_114) noreturn nounwind {
 entry:
 	unreachable
 

diff  --git a/llvm/test/Transforms/Inline/AArch64/binop.ll b/llvm/test/Transforms/Inline/AArch64/binop.ll
index c575bbcf08e17..eb882282820b8 100644
--- a/llvm/test/Transforms/Inline/AArch64/binop.ll
+++ b/llvm/test/Transforms/Inline/AArch64/binop.ll
@@ -23,7 +23,7 @@ define i32 @outer_add2(i32 %a) {
 define i32 @add(i32 %a, i32 %b) {
   %add = add i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %add
 }
 
@@ -39,7 +39,7 @@ define i32 @outer_sub1(i32 %a) {
 define i32 @sub1(i32 %a, i32 %b) {
   %sub = sub i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %sub
 }
 
@@ -76,7 +76,7 @@ define i32 @outer_mul2(i32 %a) {
 define i32 @mul(i32 %a, i32 %b) {
   %mul = mul i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %mul
 }
 
@@ -99,7 +99,7 @@ define i32 @outer_div2(i32 %a) {
 define i32 @div1(i32 %a, i32 %b) {
   %div = sdiv i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %div
 }
 
@@ -136,7 +136,7 @@ define i32 @outer_rem2(i32 %a) {
 define i32 @rem1(i32 %a, i32 %b) {
   %rem = urem i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %rem
 }
 
@@ -166,7 +166,7 @@ define i32 @outer_shl1(i32 %a) {
 define i32 @shl(i32 %a, i32 %b) {
   %shl = shl i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %shl
 }
 
@@ -182,7 +182,7 @@ define i32 @outer_shr1(i32 %a) {
 define i32 @shr(i32 %a, i32 %b) {
   %shr = ashr i32 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %shr
 }
 
@@ -205,7 +205,7 @@ define i1 @outer_and2(i1 %a) {
 define i1 @and1(i1 %a, i1 %b) {
   %and = and i1 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %and
 }
 
@@ -242,7 +242,7 @@ define i1 @outer_or2(i1 %a) {
 define i1 @or1(i1 %a, i1 %b) {
   %or = or i1 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %or
 }
 
@@ -272,7 +272,7 @@ define i1 @outer_xor1(i1 %a) {
 define i1 @xor1(i1 %a, i1 %b) {
   %xor = xor i1 %a, %b
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %xor
 }
 

diff  --git a/llvm/test/Transforms/Inline/AArch64/gep-cost.ll b/llvm/test/Transforms/Inline/AArch64/gep-cost.ll
index 929d3c60cbd22..9603113082604 100644
--- a/llvm/test/Transforms/Inline/AArch64/gep-cost.ll
+++ b/llvm/test/Transforms/Inline/AArch64/gep-cost.ll
@@ -4,18 +4,18 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-define void @outer1([4 x i32]* %ptr, i32 %i) {
-  call void @inner1([4 x i32]* %ptr, i32 %i)
+define void @outer1(ptr %ptr, i32 %i) {
+  call void @inner1(ptr %ptr, i32 %i)
   ret void
 }
 
-define void @outer2([4 x i32]* %ptr, i32 %i) {
-  call void @inner2([4 x i32]* %ptr, i32 %i)
+define void @outer2(ptr %ptr, i32 %i) {
+  call void @inner2(ptr %ptr, i32 %i)
   ret void
 }
 
-define void @outer3([4 x i32]* %ptr, i32 %j) {
-  call void @inner3([4 x i32]* %ptr, i32 0, i32 %j)
+define void @outer3(ptr %ptr, i32 %j) {
+  call void @inner3(ptr %ptr, i32 0, i32 %j)
   ret void
 }
 
@@ -24,8 +24,8 @@ define void @outer3([4 x i32]* %ptr, i32 %j) {
 ; CHECK: Analyzing call of inner1
 ; CHECK: NumInstructionsSimplified: 2
 ; CHECK: NumInstructions: 2
-define void @inner1([4 x i32]* %ptr, i32 %i) {
-  %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 0, i32 %i
+define void @inner1(ptr %ptr, i32 %i) {
+  %G = getelementptr inbounds [4 x i32], ptr %ptr, i32 0, i32 %i
   ret void
 }
 
@@ -34,8 +34,8 @@ define void @inner1([4 x i32]* %ptr, i32 %i) {
 ; CHECK: Analyzing call of inner2
 ; CHECK: NumInstructionsSimplified: 1
 ; CHECK: NumInstructions: 2
-define void @inner2([4 x i32]* %ptr, i32 %i) {
-  %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 1, i32 %i
+define void @inner2(ptr %ptr, i32 %i) {
+  %G = getelementptr inbounds [4 x i32], ptr %ptr, i32 1, i32 %i
   ret void
 }
 
@@ -45,7 +45,7 @@ define void @inner2([4 x i32]* %ptr, i32 %i) {
 ; CHECK: Analyzing call of inner3
 ; CHECK: NumInstructionsSimplified: 2
 ; CHECK: NumInstructions: 2
-define void @inner3([4 x i32]* %ptr, i32 %i, i32 %j) {
-  %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 %i, i32 %j
+define void @inner3(ptr %ptr, i32 %i, i32 %j) {
+  %G = getelementptr inbounds [4 x i32], ptr %ptr, i32 %i, i32 %j
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/AArch64/phi.ll b/llvm/test/Transforms/Inline/AArch64/phi.ll
index 4439891e92542..4e4c6a4ecee2c 100644
--- a/llvm/test/Transforms/Inline/AArch64/phi.ll
+++ b/llvm/test/Transforms/Inline/AArch64/phi.ll
@@ -22,11 +22,11 @@ if_true:
   br i1 %phi, label %if_true, label %exit
 
 exit:
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
-  store i32 2, i32* @glbl
-  store i32 3, i32* @glbl
-  store i32 4, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
+  store i32 2, ptr @glbl
+  store i32 3, ptr @glbl
+  store i32 4, ptr @glbl
   ret i1 %phi
 }
 
@@ -69,11 +69,11 @@ if_true:
 exit:
   %phi = phi i32 [0, %entry], [0, %if_true] ; Simplified to 0
   %cmp = icmp eq i32 %phi, 0
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
-  store i32 2, i32* @glbl
-  store i32 3, i32* @glbl
-  store i32 4, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
+  store i32 2, ptr @glbl
+  store i32 3, ptr @glbl
+  store i32 4, ptr @glbl
   ret i1 %cmp
 }
 
@@ -126,7 +126,7 @@ exit:
   %phi = phi i32 [%val1, %entry], [%val2, %if_true] ; Can be simplified to a constant if %val1 and %val2 are the same constants
   %cmp = icmp eq i32 %phi, 0
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %cmp
 }
 
@@ -149,8 +149,8 @@ exit:
   %phi = phi i32 [%val1, %entry], [%val2, %if_true] ; Simplified to 0
   %cmp = icmp eq i32 %phi, 0
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i1 %cmp
 }
 
@@ -173,8 +173,8 @@ exit:
   %phi = phi i32 [%val1, %entry], [%val2, %if_true] ; Simplified to 0
   %cmp = icmp eq i32 %phi, 0
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i1 %cmp
 }
 
@@ -253,7 +253,7 @@ exit:
   %phi = phi i32 [0, %zero], [1, %one], [2, %two_true], [2, %two_false], [-1, %entry] ; Simplified to 0
   %cmp = icmp eq i32 %phi, 0
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %cmp
 }
 
@@ -262,17 +262,17 @@ define i32 @outer10(i1 %cond) {
 ; CHECK-LABEL: @outer10(
 ; CHECK-NOT: call i32 @inner10
   %A = alloca i32
-  %C = call i32 @inner10(i1 %cond, i32* %A)
+  %C = call i32 @inner10(i1 %cond, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner10(i1 %cond, i32* %A) {
+define i32 @inner10(i1 %cond, ptr %A) {
 entry:
   br label %if_true
 
 if_true:
-  %phi = phi i32* [%A, %entry], [%phi, %if_true] ; Simplified to %A
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%A, %entry], [%phi, %if_true] ; Simplified to %A
+  %load = load i32, ptr %phi
   br i1 %cond, label %if_true, label %exit
 
 exit:
@@ -281,20 +281,20 @@ exit:
 }
 
 
-define i32 @outer11(i1 %cond, i32* %ptr) {
+define i32 @outer11(i1 %cond, ptr %ptr) {
 ; CHECK-LABEL: @outer11(
 ; CHECK: call i32 @inner11
-  %C = call i32 @inner11(i1 %cond, i32* %ptr)
+  %C = call i32 @inner11(i1 %cond, ptr %ptr)
   ret i32 %C
 }
 
-define i32 @inner11(i1 %cond, i32* %ptr) {
+define i32 @inner11(i1 %cond, ptr %ptr) {
 entry:
   br label %if_true
 
 if_true:
-  %phi = phi i32* [%ptr, %entry], [%phi, %if_true] ; Cannot be simplified
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%ptr, %entry], [%phi, %if_true] ; Cannot be simplified
+  %load = load i32, ptr %phi
   br i1 %cond, label %if_true, label %exit
 
 exit:
@@ -307,11 +307,11 @@ define i32 @outer12(i1 %cond) {
 ; CHECK-LABEL: @outer12(
 ; CHECK-NOT: call i32 @inner12
   %A = alloca i32
-  %C = call i32 @inner12(i1 %cond, i32* %A)
+  %C = call i32 @inner12(i1 %cond, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner12(i1 %cond, i32* %ptr) {
+define i32 @inner12(i1 %cond, ptr %ptr) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -319,8 +319,8 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [%ptr, %entry], [%ptr, %if_true] ; Simplified to %A
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%ptr, %entry], [%ptr, %if_true] ; Simplified to %A
+  %load = load i32, ptr %phi
   call void @pad()
   ret i32 %load
 }
@@ -330,23 +330,23 @@ define i32 @outer13(i1 %cond) {
 ; CHECK-LABEL: @outer13(
 ; CHECK-NOT: call i32 @inner13
   %A = alloca i32
-  %C = call i32 @inner13(i1 %cond, i32* %A)
+  %C = call i32 @inner13(i1 %cond, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner13(i1 %cond, i32* %ptr) {
+define i32 @inner13(i1 %cond, ptr %ptr) {
 entry:
-  %gep1 = getelementptr inbounds i32, i32* %ptr, i32 2
-  %gep2 = getelementptr inbounds i32, i32* %ptr, i32 1
+  %gep1 = getelementptr inbounds i32, ptr %ptr, i32 2
+  %gep2 = getelementptr inbounds i32, ptr %ptr, i32 1
   br i1 %cond, label %if_true, label %exit
 
 if_true:
-  %gep3 = getelementptr inbounds i32, i32* %gep2, i32 1
+  %gep3 = getelementptr inbounds i32, ptr %gep2, i32 1
   br label %exit
 
 exit:
-  %phi = phi i32* [%gep1, %entry], [%gep3, %if_true] ; Simplifeid to %gep1
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%gep1, %entry], [%gep3, %if_true] ; Simplifeid to %gep1
+  %load = load i32, ptr %phi
   call void @pad()
   ret i32 %load
 }
@@ -357,11 +357,11 @@ define i32 @outer14(i1 %cond) {
 ; CHECK: call i32 @inner14
   %A1 = alloca i32
   %A2 = alloca i32
-  %C = call i32 @inner14(i1 %cond, i32* %A1, i32* %A2)
+  %C = call i32 @inner14(i1 %cond, ptr %A1, ptr %A2)
   ret i32 %C
 }
 
-define i32 @inner14(i1 %cond, i32* %ptr1, i32* %ptr2) {
+define i32 @inner14(i1 %cond, ptr %ptr1, ptr %ptr2) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -369,23 +369,23 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [%ptr1, %entry], [%ptr2, %if_true] ; Cannot be simplified
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%ptr1, %entry], [%ptr2, %if_true] ; Cannot be simplified
+  %load = load i32, ptr %phi
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %load
 }
 
 
-define i32 @outer15(i1 %cond, i32* %ptr) {
+define i32 @outer15(i1 %cond, ptr %ptr) {
 ; CHECK-LABEL: @outer15(
 ; CHECK-NOT: call i32 @inner15
   %A = alloca i32
-  %C = call i32 @inner15(i1 true, i32* %ptr, i32* %A)
+  %C = call i32 @inner15(i1 true, ptr %ptr, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner15(i1 %cond, i32* %ptr1, i32* %ptr2) {
+define i32 @inner15(i1 %cond, ptr %ptr1, ptr %ptr2) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -393,24 +393,24 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [%ptr1, %entry], [%ptr2, %if_true] ; Simplified to %A
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%ptr1, %entry], [%ptr2, %if_true] ; Simplified to %A
+  %load = load i32, ptr %phi
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i32 %load
 }
 
 
-define i32 @outer16(i1 %cond, i32* %ptr) {
+define i32 @outer16(i1 %cond, ptr %ptr) {
 ; CHECK-LABEL: @outer16(
 ; CHECK-NOT: call i32 @inner16
   %A = alloca i32
-  %C = call i32 @inner16(i1 false, i32* %A, i32* %ptr)
+  %C = call i32 @inner16(i1 false, ptr %A, ptr %ptr)
   ret i32 %C
 }
 
-define i32 @inner16(i1 %cond, i32* %ptr1, i32* %ptr2) {
+define i32 @inner16(i1 %cond, ptr %ptr1, ptr %ptr2) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -418,11 +418,11 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [%ptr1, %entry], [%ptr2, %if_true] ; Simplified to %A
-  %load = load i32, i32* %phi
+  %phi = phi ptr [%ptr1, %entry], [%ptr2, %if_true] ; Simplified to %A
+  %load = load i32, ptr %phi
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i32 %load
 }
 
@@ -431,11 +431,11 @@ define i1 @outer17(i1 %cond) {
 ; CHECK-LABEL: @outer17(
 ; CHECK: call i1 @inner17
   %A = alloca i32
-  %C = call i1 @inner17(i1 %cond, i32* %A)
+  %C = call i1 @inner17(i1 %cond, ptr %A)
   ret i1 %C
 }
 
-define i1 @inner17(i1 %cond, i32* %ptr) {
+define i1 @inner17(i1 %cond, ptr %ptr) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -443,8 +443,8 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [null, %entry], [%ptr, %if_true] ; Cannot be mapped to a constant
-  %cmp = icmp eq i32* %phi, null
+  %phi = phi ptr [null, %entry], [%ptr, %if_true] ; Cannot be mapped to a constant
+  %cmp = icmp eq ptr %phi, null
   call void @pad()
   ret i1 %cmp
 }
@@ -485,11 +485,11 @@ define i1 @outer19(i1 %cond) {
 ; CHECK-LABEL: @outer19(
 ; CHECK: call i1 @inner19
   %A = alloca i32
-  %C = call i1 @inner19(i1 %cond, i32* %A)
+  %C = call i1 @inner19(i1 %cond, ptr %A)
   ret i1 %C
 }
 
-define i1 @inner19(i1 %cond, i32* %ptr) {
+define i1 @inner19(i1 %cond, ptr %ptr) {
 entry:
   br i1 %cond, label %if_true, label %exit
 
@@ -497,8 +497,8 @@ if_true:
   br label %exit
 
 exit:
-  %phi = phi i32* [%ptr, %entry], [null, %if_true] ; Cannot be mapped to a constant
-  %cmp = icmp eq i32* %phi, null
+  %phi = phi ptr [%ptr, %entry], [null, %if_true] ; Cannot be mapped to a constant
+  %cmp = icmp eq ptr %phi, null
   call void @pad()
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/Inline/AArch64/select.ll b/llvm/test/Transforms/Inline/AArch64/select.ll
index 34d2bc8830f8f..d46ca0ba78278 100644
--- a/llvm/test/Transforms/Inline/AArch64/select.ll
+++ b/llvm/test/Transforms/Inline/AArch64/select.ll
@@ -16,7 +16,7 @@ define i32 @outer1(i1 %cond) {
 define i32 @inner1(i1 %cond, i32 %val) {
   %select = select i1 %cond, i32 1, i32 %val       ; Simplified to 1
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %select                                  ; Simplifies to ret i32 1
 }
 
@@ -31,7 +31,7 @@ define i32 @outer2(i32 %val) {
 define i32 @inner2(i1 %cond, i32 %val) {
   %select = select i1 %cond, i32 1, i32 %val       ; Simplifies to 1
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %select                                  ; Simplifies to ret i32 1
 }
 
@@ -46,7 +46,7 @@ define i32 @outer3(i32 %val) {
 define i32 @inner3(i1 %cond, i32 %val) {
   %select = select i1 %cond, i32 %val, i32 -1      ; Simplifies to -1
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %select                                  ; Simplifies to ret i32 -1
 }
 
@@ -61,8 +61,8 @@ define i32 @outer4() {
 define i32 @inner4(i1 %cond, i32 %val1, i32 %val2) {
   %select = select i1 %cond, i32 %val1, i32 %val2  ; Simplifies to 1
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i32 %select                                  ; Simplifies to ret i32 1
 }
 
@@ -85,7 +85,7 @@ isfalse:                                           ; This block is unreachable o
   br label %exit
 
 exit:
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %select                                   ; Simplifies to ret i1 true
 }
 
@@ -94,51 +94,51 @@ define i32 @outer6(i1 %cond) {
 ; CHECK-LABEL: @outer6(
 ; CHECK-NOT: call i32 @inner6
   %A = alloca i32
-  %C = call i32 @inner6(i1 %cond, i32* %A)
+  %C = call i32 @inner6(i1 %cond, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner6(i1 %cond, i32* %ptr) {
-  %G1 = getelementptr inbounds i32, i32* %ptr, i32 1
-  %G2 = getelementptr inbounds i32, i32* %G1, i32 1
-  %G3 = getelementptr inbounds i32, i32* %ptr, i32 2
-  %select = select i1 %cond, i32* %G2, i32* %G3    ; Simplified to %A[2]
-  %load = load i32, i32* %select                   ; SROA'ed
+define i32 @inner6(i1 %cond, ptr %ptr) {
+  %G1 = getelementptr inbounds i32, ptr %ptr, i32 1
+  %G2 = getelementptr inbounds i32, ptr %G1, i32 1
+  %G3 = getelementptr inbounds i32, ptr %ptr, i32 2
+  %select = select i1 %cond, ptr %G2, ptr %G3    ; Simplified to %A[2]
+  %load = load i32, ptr %select                   ; SROA'ed
   call void @pad()
   ret i32 %load                                    ; Simplified
 }
 
 
-define i32 @outer7(i32* %ptr) {
+define i32 @outer7(ptr %ptr) {
 ; CHECK-LABEL: @outer7(
 ; CHECK-NOT: call i32 @inner7
   %A = alloca i32
-  %C = call i32 @inner7(i1 true, i32* %A, i32* %ptr)
+  %C = call i32 @inner7(i1 true, ptr %A, ptr %ptr)
   ret i32 %C
 }
 
-define i32 @inner7(i1 %cond, i32* %p1, i32* %p2) {
-  %select = select i1 %cond, i32* %p1, i32* %p2    ; Simplifies to %A
-  %load = load i32, i32* %select                   ; SROA'ed
+define i32 @inner7(i1 %cond, ptr %p1, ptr %p2) {
+  %select = select i1 %cond, ptr %p1, ptr %p2    ; Simplifies to %A
+  %load = load i32, ptr %select                   ; SROA'ed
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %load                                    ; Simplified
 }
 
 
-define i32 @outer8(i32* %ptr) {
+define i32 @outer8(ptr %ptr) {
 ; CHECK-LABEL: @outer8(
 ; CHECK-NOT: call i32 @inner8
   %A = alloca i32
-  %C = call i32 @inner8(i1 false, i32* %ptr, i32* %A)
+  %C = call i32 @inner8(i1 false, ptr %ptr, ptr %A)
   ret i32 %C
 }
 
-define i32 @inner8(i1 %cond, i32* %p1, i32* %p2) {
-  %select = select i1 %cond, i32* %p1, i32* %p2    ; Simplifies to %A
-  %load = load i32, i32* %select                   ; SROA'ed
+define i32 @inner8(i1 %cond, ptr %p1, ptr %p2) {
+  %select = select i1 %cond, ptr %p1, ptr %p2    ; Simplifies to %A
+  %load = load i32, ptr %select                   ; SROA'ed
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i32 %load                                    ; Simplified
 }
 
@@ -153,7 +153,7 @@ define <2 x i32> @outer9(<2 x i32> %val) {
 define <2 x i32> @inner9(<2 x i1> %cond, <2 x i32> %val) {
   %select = select <2 x i1> %cond, <2 x i32> <i32 1, i32 1>, <2 x i32> %val              ; Simplifies to <1, 1>
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret <2 x i32> %select                                                                  ; Simplifies to ret <2 x i32> <1, 1>
 }
 
@@ -168,7 +168,7 @@ define <2 x i32> @outer10(<2 x i32> %val) {
 define <2 x i32> @inner10(<2 x i1> %cond, <2 x i32> %val) {
   %select = select <2 x i1> %cond, < 2 x i32> %val, <2 x i32> <i32 -1, i32 -1>           ; Simplifies to <-1, -1>
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret <2 x i32> %select                                                                  ; Simplifies to ret <2 x i32> <-1, -1>
 }
 
@@ -187,18 +187,18 @@ define <2 x i32> @inner11(<2 x i1> %cond) {
 }
 
 
-define i1 @outer12(i32* %ptr) {
+define i1 @outer12(ptr %ptr) {
 ; CHECK-LABEL: @outer12(
 ; CHECK-NOT: call i1 @inner12
-  %C = call i1 @inner12(i1 true, i32* @glbl, i32* %ptr)
+  %C = call i1 @inner12(i1 true, ptr @glbl, ptr %ptr)
   ret i1 %C
 }
 
-define i1 @inner12(i1 %cond, i32* %ptr1, i32* %ptr2) {
-  %select = select i1 %cond, i32* %ptr1, i32* %ptr2 ; Simplified to @glbl
-  %cmp = icmp eq i32* %select, @glbl                ; Simplified to true
+define i1 @inner12(i1 %cond, ptr %ptr1, ptr %ptr2) {
+  %select = select i1 %cond, ptr %ptr1, ptr %ptr2 ; Simplified to @glbl
+  %cmp = icmp eq ptr %select, @glbl                ; Simplified to true
   call void @pad()
-  store i32 0, i32* @glbl
+  store i32 0, ptr @glbl
   ret i1 %cmp                                       ; Simplifies to ret i1 true
 }
 
@@ -213,8 +213,8 @@ define <2 x i32> @outer13(<2 x i32> %val1, <2 x i32> %val2) {
 define <2 x i32> @inner13(<2 x i1> %cond, <2 x i32> %val1, < 2 x i32> %val2) {
   %select = select <2 x i1> %cond, <2 x i32> %val1, < 2 x i32> %val2 ; Cannot be Simplified
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret <2 x i32> %select                                              ; Simplified
 }
 
@@ -229,8 +229,8 @@ define i32 @outer14(i32 %val1, i32 %val2) {
 define i32 @inner14(i1 %cond, i32 %val1, i32 %val2) {
   %select = select i1 %cond, i32 %val1, i32 %val2   ; Simplified to %val1
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i32 %select                                   ; Simplifies to ret i32 %val1
 }
 
@@ -245,7 +245,7 @@ define i32 @outer15(i32 %val1, i32 %val2) {
 define i32 @inner15(i1 %cond, i32 %val1, i32 %val2) {
   %select = select i1 %cond, i32 %val1, i32 %val2   ; Simplified to %val2
   call void @pad()
-  store i32 0, i32* @glbl
-  store i32 1, i32* @glbl
+  store i32 0, ptr @glbl
+  store i32 1, ptr @glbl
   ret i32 %select                                   ; Simplifies to ret i32 %val2
 }

diff  --git a/llvm/test/Transforms/Inline/AArch64/switch.ll b/llvm/test/Transforms/Inline/AArch64/switch.ll
index 5012e14093328..682f5ecc0bc08 100644
--- a/llvm/test/Transforms/Inline/AArch64/switch.ll
+++ b/llvm/test/Transforms/Inline/AArch64/switch.ll
@@ -1,7 +1,7 @@
 ; RUN: opt < %s -passes=inline -inline-threshold=20 -S -mtriple=aarch64-none-linux  | FileCheck %s
 ; RUN: opt < %s -passes='cgscc(inline)' -inline-threshold=20 -S -mtriple=aarch64-none-linux | FileCheck %s
 
-define i32 @callee_range(i32 %a, i32* %P) {
+define i32 @callee_range(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 0, label %sw.bb0
     i32 1000, label %sw.bb1
@@ -16,26 +16,26 @@ define i32 @callee_range(i32 %a, i32* %P) {
   ]
 
 sw.default:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 sw.bb0:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 sw.bb1:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 return:
   ret i32 42
 }
 
-define i32 @caller_range(i32 %a, i32* %P) {
+define i32 @caller_range(i32 %a, ptr %P) {
 ; CHECK-LABEL: @caller_range(
 ; CHECK: call i32 @callee_range
-  %r = call i32 @callee_range(i32 %a, i32* %P)
+  %r = call i32 @callee_range(i32 %a, ptr %P)
   ret i32 %r
 }
 
-define i32 @callee_bittest(i32 %a, i32* %P) {
+define i32 @callee_bittest(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 0, label %sw.bb0
     i32 1, label %sw.bb1
@@ -49,15 +49,15 @@ define i32 @callee_bittest(i32 %a, i32* %P) {
   ]
 
 sw.default:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb0:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb1:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb2:
@@ -68,14 +68,14 @@ return:
 }
 
 
-define i32 @caller_bittest(i32 %a, i32* %P) {
+define i32 @caller_bittest(i32 %a, ptr %P) {
 ; CHECK-LABEL: @caller_bittest(
 ; CHECK-NOT: call i32 @callee_bittest
-  %r= call i32 @callee_bittest(i32 %a, i32* %P)
+  %r= call i32 @callee_bittest(i32 %a, ptr %P)
   ret i32 %r
 }
 
-define i32 @callee_jumptable(i32 %a, i32* %P) {
+define i32 @callee_jumptable(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 1001, label %sw.bb101
     i32 1002, label %sw.bb102
@@ -95,29 +95,29 @@ sw.default:
   br label %return
 
 sw.bb101:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb102:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb103:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb104:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 return:
   ret i32 42
 }
 
-define i32 @caller_jumptable(i32 %a, i32 %b, i32* %P) {
+define i32 @caller_jumptable(i32 %a, i32 %b, ptr %P) {
 ; CHECK-LABEL: @caller_jumptable(
 ; CHECK: call i32 @callee_jumptable
-  %r = call i32 @callee_jumptable(i32 %b, i32* %P)
+  %r = call i32 @callee_jumptable(i32 %b, ptr %P)
   ret i32 %r
 }
 

diff  --git a/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument-cost.ll b/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument-cost.ll
index 2cbde8d5c1b7a..a11861c11342b 100644
--- a/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument-cost.ll
+++ b/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument-cost.ll
@@ -9,14 +9,13 @@ target datalayout = "A5"
 ; CHECK: NumAllocaArgs: 1
 ; CHECK: Threshold: 66000
 
-define void @use_private_ptr_arg(float addrspace(5)* nocapture %p) {
+define void @use_private_ptr_arg(ptr addrspace(5) nocapture %p) {
   ret void
 }
 
-define amdgpu_kernel void @test_inliner_pvt_ptr(float addrspace(1)* nocapture %a, i32 %n) {
+define amdgpu_kernel void @test_inliner_pvt_ptr(ptr addrspace(1) nocapture %a, i32 %n) {
 entry:
   %pvt_arr = alloca [64 x float], align 4, addrspace(5)
-  %to.ptr = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 0
-  call void @use_private_ptr_arg(float addrspace(5)* %to.ptr)
+  call void @use_private_ptr_arg(ptr addrspace(5) %pvt_arr)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll b/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
index 912ae73dd6172..cd940ee5b3700 100644
--- a/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
+++ b/llvm/test/Transforms/Inline/AMDGPU/amdgpu-inline-alloca-argument.ll
@@ -3,9 +3,9 @@
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
-define void @use_flat_ptr_arg(float* nocapture %p) {
+define void @use_flat_ptr_arg(ptr nocapture %p) {
 entry:
-  %tmp1 = load float, float* %p, align 4
+  %tmp1 = load float, ptr %p, align 4
   %div = fdiv float 1.000000e+00, %tmp1
   %add0 = fadd float %div, 1.0
   %add1 = fadd float %add0, 1.0
@@ -18,13 +18,13 @@ entry:
   %add8 = fadd float %add7, 1.0
   %add9 = fadd float %add8, 1.0
   %add10 = fadd float %add9, 1.0
-  store float %add10, float* %p, align 4
+  store float %add10, ptr %p, align 4
   ret void
 }
 
-define void @use_private_ptr_arg(float addrspace(5)* nocapture %p) {
+define void @use_private_ptr_arg(ptr addrspace(5) nocapture %p) {
 entry:
-  %tmp1 = load float, float addrspace(5)* %p, align 4
+  %tmp1 = load float, ptr addrspace(5) %p, align 4
   %div = fdiv float 1.000000e+00, %tmp1
   %add0 = fadd float %div, 1.0
   %add1 = fadd float %add0, 1.0
@@ -37,7 +37,7 @@ entry:
   %add8 = fadd float %add7, 1.0
   %add9 = fadd float %add8, 1.0
   %add10 = fadd float %add9, 1.0
-  store float %add10, float addrspace(5)* %p, align 4
+  store float %add10, ptr addrspace(5) %p, align 4
   ret void
 }
 
@@ -46,21 +46,21 @@ entry:
 ; CHECK-LABEL: @test_inliner_flat_ptr(
 ; CHECK: call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NOT: call {{[.*]}}@
-define amdgpu_kernel void @test_inliner_flat_ptr(float addrspace(1)* nocapture %a, i32 %n) {
+define amdgpu_kernel void @test_inliner_flat_ptr(ptr addrspace(1) nocapture %a, i32 %n) {
 entry:
   %pvt_arr = alloca [64 x float], align 4, addrspace(5)
   %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid
-  %tmp2 = load float, float addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr addrspace(1) %a, i32 %tid
+  %tmp2 = load float, ptr addrspace(1) %arrayidx, align 4
   %add = add i32 %tid, 1
-  %arrayidx2 = getelementptr inbounds float, float addrspace(1)* %a, i32 %add
-  %tmp5 = load float, float addrspace(1)* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr addrspace(1) %a, i32 %add
+  %tmp5 = load float, ptr addrspace(1) %arrayidx2, align 4
   %or = or i32 %tid, %n
-  %arrayidx5 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
-  %arrayidx7 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or
-  %to.flat = addrspacecast float addrspace(5)* %arrayidx7 to float*
-  call void @use_private_ptr_arg(float addrspace(5)* %arrayidx7)
-  call void @use_flat_ptr_arg(float* %to.flat)
+  %arrayidx5 = getelementptr inbounds [64 x float], ptr addrspace(5) %pvt_arr, i32 0, i32 %or
+  %arrayidx7 = getelementptr inbounds [64 x float], ptr addrspace(5) %pvt_arr, i32 0, i32 %or
+  %to.flat = addrspacecast ptr addrspace(5) %arrayidx7 to ptr
+  call void @use_private_ptr_arg(ptr addrspace(5) %arrayidx7)
+  call void @use_flat_ptr_arg(ptr %to.flat)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/AMDGPU/inline-amdgpu-vecbonus.ll b/llvm/test/Transforms/Inline/AMDGPU/inline-amdgpu-vecbonus.ll
index d9f1adb61dff1..76af847da753a 100644
--- a/llvm/test/Transforms/Inline/AMDGPU/inline-amdgpu-vecbonus.ll
+++ b/llvm/test/Transforms/Inline/AMDGPU/inline-amdgpu-vecbonus.ll
@@ -22,11 +22,11 @@ entry:
 ; CHECK-NOT: udiv
 ; CHECK: tail call <16 x i32> @div_vecbonus
 ; CHECK: ret void
-define amdgpu_kernel void @caller_vecbonus(<16 x i32> addrspace(1)* nocapture %x, <16 x i32> addrspace(1)* nocapture readonly %y) {
+define amdgpu_kernel void @caller_vecbonus(ptr addrspace(1) nocapture %x, ptr addrspace(1) nocapture readonly %y) {
 entry:
-  %tmp = load <16 x i32>, <16 x i32> addrspace(1)* %x
-  %tmp1 = load <16 x i32>, <16 x i32> addrspace(1)* %y
+  %tmp = load <16 x i32>, ptr addrspace(1) %x
+  %tmp1 = load <16 x i32>, ptr addrspace(1) %y
   %div.i = tail call <16 x i32> @div_vecbonus(<16 x i32> %tmp, <16 x i32> %tmp1)
-  store <16 x i32> %div.i, <16 x i32> addrspace(1)* %x
+  store <16 x i32> %div.i, ptr addrspace(1) %x
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/AMDGPU/inline-hint.ll b/llvm/test/Transforms/Inline/AMDGPU/inline-hint.ll
index 2531dd2f4d4b2..06172756ecff2 100644
--- a/llvm/test/Transforms/Inline/AMDGPU/inline-hint.ll
+++ b/llvm/test/Transforms/Inline/AMDGPU/inline-hint.ll
@@ -29,12 +29,12 @@ entry:
 ; CHECK-NOT: call
 ; CHECK: udiv
 ; CHECK: ret void
-define amdgpu_kernel void @caller_hint(<16 x i32> addrspace(1)* nocapture %x, <16 x i32> addrspace(1)* nocapture readonly %y) {
+define amdgpu_kernel void @caller_hint(ptr addrspace(1) nocapture %x, ptr addrspace(1) nocapture readonly %y) {
 entry:
-  %tmp = load <16 x i32>, <16 x i32> addrspace(1)* %x, align 4
-  %tmp1 = load <16 x i32>, <16 x i32> addrspace(1)* %y, align 4
+  %tmp = load <16 x i32>, ptr addrspace(1) %x, align 4
+  %tmp1 = load <16 x i32>, ptr addrspace(1) %y, align 4
   %div.i = tail call <16 x i32> @div_hint(<16 x i32> %tmp, <16 x i32> %tmp1) #0
-  store <16 x i32> %div.i, <16 x i32> addrspace(1)* %x, align 4
+  store <16 x i32> %div.i, ptr addrspace(1) %x, align 4
   ret void
 }
 
@@ -66,12 +66,12 @@ entry:
 ; CHECK-NOT: udiv
 ; CHECK: tail call <16 x i32> @div_nohint
 ; CHECK: ret void
-define amdgpu_kernel void @caller_nohint(<16 x i32> addrspace(1)* nocapture %x, <16 x i32> addrspace(1)* nocapture readonly %y) {
+define amdgpu_kernel void @caller_nohint(ptr addrspace(1) nocapture %x, ptr addrspace(1) nocapture readonly %y) {
 entry:
-  %tmp = load <16 x i32>, <16 x i32> addrspace(1)* %x
-  %tmp1 = load <16 x i32>, <16 x i32> addrspace(1)* %y
+  %tmp = load <16 x i32>, ptr addrspace(1) %x
+  %tmp1 = load <16 x i32>, ptr addrspace(1) %y
   %div.i = tail call <16 x i32> @div_nohint(<16 x i32> %tmp, <16 x i32> %tmp1)
-  store <16 x i32> %div.i, <16 x i32> addrspace(1)* %x
+  store <16 x i32> %div.i, ptr addrspace(1) %x
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/ARM/loop-add.ll b/llvm/test/Transforms/Inline/ARM/loop-add.ll
index de89d1468e3b3..d1d35320a0bd7 100644
--- a/llvm/test/Transforms/Inline/ARM/loop-add.ll
+++ b/llvm/test/Transforms/Inline/ARM/loop-add.ll
@@ -4,48 +4,48 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv7m-arm-none-eabi"
 
 ; CHECK-LABEL: void @doCalls
-define void @doCalls(i8* nocapture %p1, i8* nocapture %p2, i32 %n) #0 {
+define void @doCalls(ptr nocapture %p1, ptr nocapture %p2, i32 %n) #0 {
 entry:
   %div = lshr i32 %n, 1
 ; CHECK: call void @LoopCall
-  tail call void @LoopCall(i8* %p1, i8* %p2, i32 %div) #0
+  tail call void @LoopCall(ptr %p1, ptr %p2, i32 %div) #0
 
   %div2 = lshr i32 %n, 2
 ; CHECK: call void @LoopCall
-  tail call void @LoopCall(i8* %p1, i8* %p2, i32 %div2) #0
+  tail call void @LoopCall(ptr %p1, ptr %p2, i32 %div2) #0
 
 ; CHECK-NOT: call void @LoopCall
-  tail call void @LoopCall(i8* %p2, i8* %p1, i32 0) #0
+  tail call void @LoopCall(ptr %p2, ptr %p1, i32 0) #0
 
 ; CHECK-NOT: call void @LoopCall_internal
-  tail call void @LoopCall_internal(i8* %p1, i8* %p2, i32 %div2) #0
+  tail call void @LoopCall_internal(ptr %p1, ptr %p2, i32 %div2) #0
 
   %div3 = lshr i32 %n, 4
 ; CHECK-NOT: call void @SimpleCall
-  tail call void @SimpleCall(i8* %p2, i8* %p1, i32 %div3) #0
+  tail call void @SimpleCall(ptr %p2, ptr %p1, i32 %div3) #0
   ret void
 }
 
 ; CHECK-LABEL: define void @LoopCall
-define void @LoopCall(i8* nocapture %dest, i8* nocapture readonly %source, i32 %num) #0 {
+define void @LoopCall(ptr nocapture %dest, ptr nocapture readonly %source, i32 %num) #0 {
 entry:
   %c = icmp ne i32 %num, 0
   br i1 %c, label %while.cond, label %while.end
 
 while.cond:                                       ; preds = %while.body, %entry
   %num.addr.0 = phi i32 [ %num, %entry ], [ %dec, %while.body ]
-  %p_dest.0 = phi i8* [ %dest, %entry ], [ %incdec.ptr2, %while.body ]
-  %p_source.0 = phi i8* [ %source, %entry ], [ %incdec.ptr, %while.body ]
+  %p_dest.0 = phi ptr [ %dest, %entry ], [ %incdec.ptr2, %while.body ]
+  %p_source.0 = phi ptr [ %source, %entry ], [ %incdec.ptr, %while.body ]
   %cmp = icmp eq i32 %num.addr.0, 0
   br i1 %cmp, label %while.end, label %while.body
 
 while.body:                                       ; preds = %while.cond
-  %incdec.ptr = getelementptr inbounds i8, i8* %p_source.0, i32 1
-  %0 = load i8, i8* %p_source.0, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %p_source.0, i32 1
+  %0 = load i8, ptr %p_source.0, align 1
   %1 = trunc i32 %num.addr.0 to i8
   %conv1 = add i8 %0, %1
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %p_dest.0, i32 1
-  store i8 %conv1, i8* %p_dest.0, align 1
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %p_dest.0, i32 1
+  store i8 %conv1, ptr %p_dest.0, align 1
   %dec = add i32 %num.addr.0, -1
   br label %while.cond
 
@@ -54,25 +54,25 @@ while.end:                                        ; preds = %while.cond
 }
 
 ; CHECK-LABEL-NOT: define void @LoopCall_internal
-define internal void @LoopCall_internal(i8* nocapture %dest, i8* nocapture readonly %source, i32 %num) #0 {
+define internal void @LoopCall_internal(ptr nocapture %dest, ptr nocapture readonly %source, i32 %num) #0 {
 entry:
   %c = icmp ne i32 %num, 0
   br i1 %c, label %while.cond, label %while.end
 
 while.cond:                                       ; preds = %while.body, %entry
   %num.addr.0 = phi i32 [ %num, %entry ], [ %dec, %while.body ]
-  %p_dest.0 = phi i8* [ %dest, %entry ], [ %incdec.ptr2, %while.body ]
-  %p_source.0 = phi i8* [ %source, %entry ], [ %incdec.ptr, %while.body ]
+  %p_dest.0 = phi ptr [ %dest, %entry ], [ %incdec.ptr2, %while.body ]
+  %p_source.0 = phi ptr [ %source, %entry ], [ %incdec.ptr, %while.body ]
   %cmp = icmp eq i32 %num.addr.0, 0
   br i1 %cmp, label %while.end, label %while.body
 
 while.body:                                       ; preds = %while.cond
-  %incdec.ptr = getelementptr inbounds i8, i8* %p_source.0, i32 1
-  %0 = load i8, i8* %p_source.0, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %p_source.0, i32 1
+  %0 = load i8, ptr %p_source.0, align 1
   %1 = trunc i32 %num.addr.0 to i8
   %conv1 = add i8 %0, %1
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %p_dest.0, i32 1
-  store i8 %conv1, i8* %p_dest.0, align 1
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %p_dest.0, i32 1
+  store i8 %conv1, ptr %p_dest.0, align 1
   %dec = add i32 %num.addr.0, -1
   br label %while.cond
 
@@ -81,13 +81,13 @@ while.end:                                        ; preds = %while.cond
 }
 
 ; CHECK-LABEL: define void @SimpleCall
-define void @SimpleCall(i8* nocapture %dest, i8* nocapture readonly %source, i32 %num) #0 {
+define void @SimpleCall(ptr nocapture %dest, ptr nocapture readonly %source, i32 %num) #0 {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %source, i32 %num
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %source, i32 %num
+  %0 = load i8, ptr %arrayidx, align 1
   %1 = xor i8 %0, 127
-  %arrayidx2 = getelementptr inbounds i8, i8* %dest, i32 %num
-  store i8 %1, i8* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %dest, i32 %num
+  store i8 %1, ptr %arrayidx2, align 1
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/ARM/loop-memcpy.ll b/llvm/test/Transforms/Inline/ARM/loop-memcpy.ll
index aea97fd74ca0a..a3b2686008669 100644
--- a/llvm/test/Transforms/Inline/ARM/loop-memcpy.ll
+++ b/llvm/test/Transforms/Inline/ARM/loop-memcpy.ll
@@ -4,10 +4,10 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv7m-arm-none-eabi"
 
 ; CHECK-LABEL: define void @matcpy
-define void @matcpy(i8* %dest, i8* %source, i32 %num) #0 {
+define void @matcpy(ptr %dest, ptr %source, i32 %num) #0 {
 entry:
-  %0 = ptrtoint i8* %dest to i32
-  %1 = ptrtoint i8* %source to i32
+  %0 = ptrtoint ptr %dest to i32
+  %1 = ptrtoint ptr %source to i32
   %2 = xor i32 %0, %1
   %3 = and i32 %2, 3
   %cmp = icmp eq i32 %3, 0
@@ -29,30 +29,30 @@ if.then4:                                         ; preds = %if.then
 
 if.then8:                                         ; preds = %if.then4
 ; CHECK: call fastcc void @memcpy
-  call fastcc void @memcpy(i8* %dest, i8* %source, i32 %and2) #0
-  %add.ptr = getelementptr inbounds i8, i8* %dest, i32 %and2
-  %add.ptr9 = getelementptr inbounds i8, i8* %source, i32 %and2
+  call fastcc void @memcpy(ptr %dest, ptr %source, i32 %and2) #0
+  %add.ptr = getelementptr inbounds i8, ptr %dest, i32 %and2
+  %add.ptr9 = getelementptr inbounds i8, ptr %source, i32 %and2
   br label %if.end
 
 if.end:                                           ; preds = %if.then4, %if.then8
-  %p_dest.0 = phi i8* [ %add.ptr, %if.then8 ], [ %dest, %if.then4 ]
-  %p_source.0 = phi i8* [ %add.ptr9, %if.then8 ], [ %source, %if.then4 ]
+  %p_dest.0 = phi ptr [ %add.ptr, %if.then8 ], [ %dest, %if.then4 ]
+  %p_source.0 = phi ptr [ %add.ptr9, %if.then8 ], [ %source, %if.then4 ]
   %tobool14 = icmp eq i32 %sub7, 0
   br i1 %tobool14, label %if.end22, label %if.then15
 
 if.then15:                                        ; preds = %if.end
-  %add.ptr13 = getelementptr inbounds i8, i8* %p_source.0, i32 %shr
-  %add.ptr11 = getelementptr inbounds i8, i8* %p_dest.0, i32 %shr
+  %add.ptr13 = getelementptr inbounds i8, ptr %p_source.0, i32 %shr
+  %add.ptr11 = getelementptr inbounds i8, ptr %p_dest.0, i32 %shr
 ; CHECK: call fastcc void @memcpy
-  call fastcc void @memcpy(i8* %add.ptr11, i8* %add.ptr13, i32 %sub7) #0
+  call fastcc void @memcpy(ptr %add.ptr11, ptr %add.ptr13, i32 %sub7) #0
   br label %if.end22
 
 if.else:                                          ; preds = %if.then
-  call fastcc void @memcpy(i8* %dest, i8* %source, i32 %num) #0
+  call fastcc void @memcpy(ptr %dest, ptr %source, i32 %num) #0
   br label %if.end22
 
 if.else20:                                        ; preds = %entry
-  call fastcc void @memcpy(i8* %dest, i8* %source, i32 %num) #0
+  call fastcc void @memcpy(ptr %dest, ptr %source, i32 %num) #0
   br label %if.end22
 
 if.end22:                                         ; preds = %if.then15, %if.end, %if.else, %if.else20
@@ -60,22 +60,22 @@ if.end22:                                         ; preds = %if.then15, %if.end,
 }
 
 ; CHECK-LABEL: define internal void @memcpy
-define internal void @memcpy(i8* nocapture %dest, i8* nocapture readonly %source, i32 %num) #0 {
+define internal void @memcpy(ptr nocapture %dest, ptr nocapture readonly %source, i32 %num) #0 {
 entry:
   br label %while.cond
 
 while.cond:                                       ; preds = %while.body, %entry
   %num.addr.0 = phi i32 [ %num, %entry ], [ %dec, %while.body ]
-  %p_dest.0 = phi i8* [ %dest, %entry ], [ %incdec.ptr1, %while.body ]
-  %p_source.0 = phi i8* [ %source, %entry ], [ %incdec.ptr, %while.body ]
+  %p_dest.0 = phi ptr [ %dest, %entry ], [ %incdec.ptr1, %while.body ]
+  %p_source.0 = phi ptr [ %source, %entry ], [ %incdec.ptr, %while.body ]
   %cmp = icmp eq i32 %num.addr.0, 0
   br i1 %cmp, label %while.end, label %while.body
 
 while.body:                                       ; preds = %while.cond
-  %incdec.ptr = getelementptr inbounds i8, i8* %p_source.0, i32 1
-  %0 = load i8, i8* %p_source.0, align 1
-  %incdec.ptr1 = getelementptr inbounds i8, i8* %p_dest.0, i32 1
-  store i8 %0, i8* %p_dest.0, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %p_source.0, i32 1
+  %0 = load i8, ptr %p_source.0, align 1
+  %incdec.ptr1 = getelementptr inbounds i8, ptr %p_dest.0, i32 1
+  store i8 %0, ptr %p_dest.0, align 1
   %dec = add i32 %num.addr.0, -1
   br label %while.cond
 

diff  --git a/llvm/test/Transforms/Inline/ARM/loop-noinline.ll b/llvm/test/Transforms/Inline/ARM/loop-noinline.ll
index 72e2b3f42ea2b..e27336e5276f0 100644
--- a/llvm/test/Transforms/Inline/ARM/loop-noinline.ll
+++ b/llvm/test/Transforms/Inline/ARM/loop-noinline.ll
@@ -6,43 +6,43 @@ target triple = "thumbv7m-arm-none-eabi"
 ; Check we don't inline loops at -Oz. They tend to be larger than we
 ; expect.
 
-; CHECK: define i8* @H
+; CHECK: define ptr @H
 @digits = constant [16 x i8] c"0123456789ABCDEF", align 1
-define i8* @H(i8* %p, i32 %val, i32 %num) #0 {
+define ptr @H(ptr %p, i32 %val, i32 %num) #0 {
 entry:
   br label %do.body
 
 do.body:                                          ; preds = %do.body, %entry
-  %p.addr.0 = phi i8* [ %p, %entry ], [ %incdec.ptr, %do.body ]
+  %p.addr.0 = phi ptr [ %p, %entry ], [ %incdec.ptr, %do.body ]
   %val.addr.0 = phi i32 [ %val, %entry ], [ %shl, %do.body ]
   %num.addr.0 = phi i32 [ %num, %entry ], [ %dec, %do.body ]
   %shr = lshr i32 %val.addr.0, 28
-  %arrayidx = getelementptr inbounds [16 x i8], [16 x i8]* @digits, i32 0, i32 %shr
-  %0 = load i8, i8* %arrayidx, align 1
-  %incdec.ptr = getelementptr inbounds i8, i8* %p.addr.0, i32 1
-  store i8 %0, i8* %p.addr.0, align 1
+  %arrayidx = getelementptr inbounds [16 x i8], ptr @digits, i32 0, i32 %shr
+  %0 = load i8, ptr %arrayidx, align 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %p.addr.0, i32 1
+  store i8 %0, ptr %p.addr.0, align 1
   %shl = shl i32 %val.addr.0, 4
   %dec = add i32 %num.addr.0, -1
   %tobool = icmp eq i32 %dec, 0
   br i1 %tobool, label %do.end, label %do.body
 
 do.end:                                           ; preds = %do.body
-  %scevgep = getelementptr i8, i8* %p, i32 %num
-  ret i8* %scevgep
+  %scevgep = getelementptr i8, ptr %p, i32 %num
+  ret ptr %scevgep
 }
 
-define nonnull i8* @call1(i8* %p, i32 %val, i32 %num) #0 {
+define nonnull ptr @call1(ptr %p, i32 %val, i32 %num) #0 {
 entry:
-; CHECK: tail call i8* @H
-  %call = tail call i8* @H(i8* %p, i32 %val, i32 %num) #0
-  ret i8* %call
+; CHECK: tail call ptr @H
+  %call = tail call ptr @H(ptr %p, i32 %val, i32 %num) #0
+  ret ptr %call
 }
 
-define nonnull i8* @call2(i8* %p, i32 %val) #0 {
+define nonnull ptr @call2(ptr %p, i32 %val) #0 {
 entry:
-; CHECK: tail call i8* @H
-  %call = tail call i8* @H(i8* %p, i32 %val, i32 32) #0
-  ret i8* %call
+; CHECK: tail call ptr @H
+  %call = tail call ptr @H(ptr %p, i32 %val, i32 32) #0
+  ret ptr %call
 }
 
 attributes #0 = { minsize optsize }

diff  --git a/llvm/test/Transforms/Inline/ML/Inputs/test-module.ll b/llvm/test/Transforms/Inline/ML/Inputs/test-module.ll
index d01f4bb301a86..005731fc18429 100644
--- a/llvm/test/Transforms/Inline/ML/Inputs/test-module.ll
+++ b/llvm/test/Transforms/Inline/ML/Inputs/test-module.ll
@@ -13,10 +13,10 @@ define dso_local i32 @top() {
 
 define internal dso_local i32 @adder(i32) {
   %2 = alloca i32, align 4
-  store i32 %0, i32* %2, align 4
-  %3 = load i32, i32* %2, align 4
+  store i32 %0, ptr %2, align 4
+  %3 = load i32, ptr %2, align 4
   %4 = call i32 @multiplier(i32 %3)
-  %5 = load i32, i32* %2, align 4
+  %5 = load i32, ptr %2, align 4
   %6 = call i32 @switcher(i32 1)
   %7 = add nsw i32 %4, %6
   ret i32 %7
@@ -24,9 +24,9 @@ define internal dso_local i32 @adder(i32) {
 
 define internal i32 @multiplier(i32) {
   %2 = alloca i32, align 4
-  store i32 %0, i32* %2, align 4
-  %3 = load i32, i32* %2, align 4
-  %4 = load i32, i32* %2, align 4
+  store i32 %0, ptr %2, align 4
+  %3 = load i32, ptr %2, align 4
+  %4 = load i32, ptr %2, align 4
   %5 = mul nsw i32 %3, %4
   ret i32 %5
 }
@@ -34,32 +34,32 @@ define internal i32 @multiplier(i32) {
 define i32 @switcher(i32) {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   switch i32 %4, label %11 [
     i32 1, label %5
     i32 2, label %6
   ]
 
 ; <label>:5:                                      ; preds = %1
-  store i32 2, i32* %2, align 4
+  store i32 2, ptr %2, align 4
   br label %12
 
 ; <label>:6:                                      ; preds = %1
-  %7 = load i32, i32* %3, align 4
-  %8 = load i32, i32* %3, align 4
+  %7 = load i32, ptr %3, align 4
+  %8 = load i32, ptr %3, align 4
   %9 = call i32 @multiplier(i32 %8)
   %10 = add nsw i32 %7, %9
-  store i32 %10, i32* %2, align 4
+  store i32 %10, ptr %2, align 4
   br label %12
 
 ; <label>:11:                                     ; preds = %1
   %adder.result = call i32 @adder(i32 2)
-  store i32 %adder.result, i32* %2, align 4
+  store i32 %adder.result, ptr %2, align 4
   br label %12
 
 ; <label>:12:                                     ; preds = %11, %6, %5
-  %13 = load i32, i32* %2, align 4
+  %13 = load i32, ptr %2, align 4
   ret i32 %13
 }
 

diff  --git a/llvm/test/Transforms/Inline/ML/scc-dead-accounting.ll b/llvm/test/Transforms/Inline/ML/scc-dead-accounting.ll
index 3042cfc60d8bc..41c0e4c313811 100644
--- a/llvm/test/Transforms/Inline/ML/scc-dead-accounting.ll
+++ b/llvm/test/Transforms/Inline/ML/scc-dead-accounting.ll
@@ -44,10 +44,10 @@ entry:
 
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* %arrayidx, align 4
+  store i32 %inc, ptr %arrayidx, align 4
   call void @_Z8inlinemei()
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, 10

diff  --git a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
index 73ab9ecf4e492..de0f6d14b0814 100644
--- a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
+++ b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
@@ -6,54 +6,52 @@
 ;
 ; CHECK: [MLInlineAdvisor] Nodes: 4 Edges: 0
 
-%"struct.std::coroutine_handle" = type { i8* }
+%"struct.std::coroutine_handle" = type { ptr }
 %"struct.std::coroutine_handle.0" = type { %"struct.std::coroutine_handle" }
 %"struct.lean_future<int>::Awaiter" = type { i32, %"struct.std::coroutine_handle.0" }
 
-declare i8* @malloc(i64)
+declare ptr @malloc(i64)
 declare void @print(i32)
 
 define void @a() presplitcoroutine {
 entry:
   %ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
   %testval = alloca i32
-  %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
-  %alloc = call i8* @malloc(i64 16) #3
-  %vFrame = call noalias nonnull i8* @llvm.coro.begin(token %id, i8* %alloc)
+  %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+  %alloc = call ptr @malloc(i64 16) #3
+  %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
 
-  %save = call token @llvm.coro.save(i8* null)
-  %Result.i19 = getelementptr inbounds %"struct.lean_future<int>::Awaiter", %"struct.lean_future<int>::Awaiter"* %ref.tmp7, i64 0, i32 0
+  %save = call token @llvm.coro.save(ptr null)
   %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
   switch i8 %suspend, label %exit [
     i8 0, label %await.ready
     i8 1, label %exit
   ]
 await.ready:
-  %StrayCoroSave = call token @llvm.coro.save(i8* null)
-  %val = load i32, i32* %Result.i19
-  %cast = bitcast i32* %testval to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %cast)
-  %test = load i32, i32* %testval
+  %StrayCoroSave = call token @llvm.coro.save(ptr null)
+  %val = load i32, ptr %ref.tmp7
+  call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+  %test = load i32, ptr %testval
   call void @print(i32 %test)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8*  %cast)
+  call void @llvm.lifetime.end.p0(i64 4, ptr  %testval)
   call void @print(i32 %val)
   br label %exit
 exit:
-  call i1 @llvm.coro.end(i8* null, i1 false)
+  call i1 @llvm.coro.end(ptr null, i1 false)
   ret void
 }
 
-declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
 declare i1 @llvm.coro.alloc(token) #3
-declare noalias nonnull i8* @"\01??2 at YAPEAX_K@Z"(i64) local_unnamed_addr
+declare noalias nonnull ptr @"\01??2 at YAPEAX_K@Z"(i64) local_unnamed_addr
 declare i64 @llvm.coro.size.i64() #5
-declare i8* @llvm.coro.begin(token, i8* writeonly) #3
+declare ptr @llvm.coro.begin(token, ptr writeonly) #3
 declare void @"\01?puts@@YAXZZ"(...)
-declare token @llvm.coro.save(i8*) #3
-declare i8* @llvm.coro.frame() #5
+declare token @llvm.coro.save(ptr) #3
+declare ptr @llvm.coro.frame() #5
 declare i8 @llvm.coro.suspend(token, i1) #3
-declare void @"\01??3 at YAXPEAX@Z"(i8*) local_unnamed_addr #10
-declare i8* @llvm.coro.free(token, i8* nocapture readonly) #2
-declare i1 @llvm.coro.end(i8*, i1) #3
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #4
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #4
+declare void @"\01??3 at YAXPEAX@Z"(ptr) local_unnamed_addr #10
+declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
+declare i1 @llvm.coro.end(ptr, i1) #3
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4

diff  --git a/llvm/test/Transforms/Inline/ML/state-tracking-scc-splits.ll b/llvm/test/Transforms/Inline/ML/state-tracking-scc-splits.ll
index 2310c80646198..ef844c44a7acd 100644
--- a/llvm/test/Transforms/Inline/ML/state-tracking-scc-splits.ll
+++ b/llvm/test/Transforms/Inline/ML/state-tracking-scc-splits.ll
@@ -7,7 +7,7 @@
 
 declare void @readnone() nofree nosync readnone
 declare void @unknown()
-declare void @reference_function_pointer(void()*) nofree nosync readnone
+declare void @reference_function_pointer(ptr) nofree nosync readnone
 
 ; The @test1_* set of functions checks that when we mutate functions with
 ; simplifycfg to delete call edges and this ends up splitting both the SCCs
@@ -77,7 +77,7 @@ define void @test2_b1() {
 }
 
 define void @test2_b2() {
-  call void @reference_function_pointer(void()* @test2_a)
+  call void @reference_function_pointer(ptr @test2_a)
   br i1 false, label %dead, label %exit
 
 dead:
@@ -89,7 +89,7 @@ exit:
 }
 
 define void @test2_b3() {
-  call void @reference_function_pointer(void()* @test2_a)
+  call void @reference_function_pointer(ptr @test2_a)
   call void @unknown()
   br i1 false, label %dead, label %exit
 
@@ -102,7 +102,7 @@ exit:
 }
 
 define void @test2_b4() {
-  call void @reference_function_pointer(void()* @test2_a)
+  call void @reference_function_pointer(ptr @test2_a)
   br i1 false, label %dead, label %exit
 
 dead:
@@ -236,7 +236,7 @@ define void @test4_b22() {
 }
 
 define void @test4_b23() {
-  call void @reference_function_pointer(void()* @test4_a)
+  call void @reference_function_pointer(ptr @test4_a)
   br i1 false, label %dead, label %exit
 
 dead:
@@ -258,7 +258,7 @@ define void @test4_b32() {
 }
 
 define void @test4_b33() {
-  call void @reference_function_pointer(void()* @test4_a)
+  call void @reference_function_pointer(ptr @test4_a)
   call void @unknown()
   br i1 false, label %dead, label %exit
 
@@ -281,7 +281,7 @@ define void @test4_b42() {
 }
 
 define void @test4_b43() {
-  call void @reference_function_pointer(void()* @test4_a)
+  call void @reference_function_pointer(ptr @test4_a)
   br i1 false, label %dead, label %exit
 
 dead:

diff  --git a/llvm/test/Transforms/Inline/X86/switch.ll b/llvm/test/Transforms/Inline/X86/switch.ll
index 76a34d40d98a5..99c7b25928b22 100644
--- a/llvm/test/Transforms/Inline/X86/switch.ll
+++ b/llvm/test/Transforms/Inline/X86/switch.ll
@@ -1,7 +1,7 @@
 ; RUN: opt < %s -passes=inline -inline-threshold=1 -S -mtriple=x86_64-unknown-linux-gnu  | FileCheck %s
 ; RUN: opt < %s -passes='cgscc(inline)' -inline-threshold=1 -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
-define i32 @callee_range(i32 %a, i32* %P) {
+define i32 @callee_range(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 0, label %sw.bb0
     i32 1000, label %sw.bb1
@@ -16,26 +16,26 @@ define i32 @callee_range(i32 %a, i32* %P) {
   ]
 
 sw.default:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 sw.bb0:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 sw.bb1:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 return:
   ret i32 42
 }
 
-define i32 @caller_range(i32 %a, i32* %P) {
+define i32 @caller_range(i32 %a, ptr %P) {
 ; CHECK-LABEL: @caller_range(
 ; CHECK: call i32 @callee_range
-  %r = call i32 @callee_range(i32 %a, i32* %P)
+  %r = call i32 @callee_range(i32 %a, ptr %P)
   ret i32 %r
 }
 
-define i32 @callee_bittest(i32 %a, i32* %P) {
+define i32 @callee_bittest(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 0, label %sw.bb0
     i32 1, label %sw.bb1
@@ -49,15 +49,15 @@ define i32 @callee_bittest(i32 %a, i32* %P) {
   ]
 
 sw.default:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb0:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb1:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb2:
@@ -68,14 +68,14 @@ return:
 }
 
 
-define i32 @caller_bittest(i32 %a, i32* %P) {
+define i32 @caller_bittest(i32 %a, ptr %P) {
 ; CHECK-LABEL: @caller_bittest(
 ; CHECK-NOT: call i32 @callee_bittest
-  %r= call i32 @callee_bittest(i32 %a, i32* %P)
+  %r= call i32 @callee_bittest(i32 %a, ptr %P)
   ret i32 %r
 }
 
-define i32 @callee_jumptable(i32 %a, i32* %P) {
+define i32 @callee_jumptable(i32 %a, ptr %P) {
   switch i32 %a, label %sw.default [
     i32 1001, label %sw.bb101
     i32 1002, label %sw.bb102
@@ -95,29 +95,29 @@ sw.default:
   br label %return
 
 sw.bb101:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb102:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb103:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 sw.bb104:
-  store volatile i32 %a, i32* %P
+  store volatile i32 %a, ptr %P
   br label %return
 
 return:
   ret i32 42
 }
 
-define i32 @caller_jumptable(i32 %a, i32 %b, i32* %P) {
+define i32 @caller_jumptable(i32 %a, i32 %b, ptr %P) {
 ; CHECK-LABEL: @caller_jumptable(
 ; CHECK: call i32 @callee_jumptable
-  %r = call i32 @callee_jumptable(i32 %b, i32* %P)
+  %r = call i32 @callee_jumptable(i32 %b, ptr %P)
   ret i32 %r
 }
 

diff  --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll
index bd2e9183ac924..a85ba7e49c6b6 100644
--- a/llvm/test/Transforms/Inline/align.ll
+++ b/llvm/test/Transforms/Inline/align.ll
@@ -3,104 +3,104 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @hello(float* align 128 nocapture %a, float* nocapture readonly %c) #0 {
+define void @hello(ptr align 128 nocapture %a, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@hello
-; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-SAME: (ptr nocapture align 128 [[A:%.*]], ptr nocapture readonly [[C:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@foo
-; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture readonly [[C:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 128) ]
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello(float* %a, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello(ptr %a, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
+define void @fooa(ptr nocapture align 128 %a, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@fooa
-; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-SAME: (ptr nocapture align 128 [[A:%.*]], ptr nocapture readonly [[C:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello(float* %a, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello(ptr %a, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @hello2(float* align 128 nocapture %a, float* align 128 nocapture %b, float* nocapture readonly %c) #0 {
+define void @hello2(ptr align 128 nocapture %a, ptr align 128 nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@hello2
-; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture align 128 [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-SAME: (ptr nocapture align 128 [[A:%.*]], ptr nocapture align 128 [[B:%.*]], ptr nocapture readonly [[C:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
-  store float %0, float* %arrayidx1, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i64 8
+  store float %0, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+define void @foo2(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@foo2
-; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture [[B:%.*]], ptr nocapture readonly [[C:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[B]], i64 128) ]
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
-; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1_I]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 128) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i64 128) ]
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello2(float* %a, float* %b, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello2(ptr %a, ptr %b, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/alloca-in-scc.ll b/llvm/test/Transforms/Inline/alloca-in-scc.ll
index 71b6ba8f57327..f8a8891e84b09 100644
--- a/llvm/test/Transforms/Inline/alloca-in-scc.ll
+++ b/llvm/test/Transforms/Inline/alloca-in-scc.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i386-apple-darwin10.0"
 
-define i32 @main(i32 %argc, i8** %argv) nounwind ssp {
+define i32 @main(i32 %argc, ptr %argv) nounwind ssp {
 entry:
   call fastcc void @c() nounwind
   unreachable
@@ -12,7 +12,7 @@ entry:
 define internal fastcc void @a() nounwind ssp {
 entry:
   %al = alloca [3 x i32], align 4
-  %0 = getelementptr inbounds [3 x i32], [3 x i32]* %al, i32 0, i32 2
+  %0 = getelementptr inbounds [3 x i32], ptr %al, i32 0, i32 2
 
   call fastcc void @c() nounwind
   unreachable

diff  --git a/llvm/test/Transforms/Inline/alloca_test.ll b/llvm/test/Transforms/Inline/alloca_test.ll
index 5712ecc3a9ffd..e4a90799fa326 100644
--- a/llvm/test/Transforms/Inline/alloca_test.ll
+++ b/llvm/test/Transforms/Inline/alloca_test.ll
@@ -7,7 +7,7 @@
 
 define i32 @func(i32 %i) {
   %X = alloca i32
-  store i32 %i, i32* %X
+  store i32 %i, ptr %X
   ret i32 %i
 }
 
@@ -41,16 +41,16 @@ define void @PR27277(i32 %p1) {
 
 ; Don't assume that the size is a ConstantInt (a ConstExpr is also a constant).
 
- at GV = common global i32* null
+ at GV = common global ptr null
 
 define void @PR27277_part2(i32 %p1) {
 ; CHECK-LABEL: @PR27277_part2(
 ; CHECK-NEXT:    [[VLA:%.*]] = alloca double, i32 %p1
-; CHECK-NEXT:    call void @PR27277_part2(i32 ptrtoint (i32** @GV to i32))
+; CHECK-NEXT:    call void @PR27277_part2(i32 ptrtoint (ptr @GV to i32))
 ; CHECK-NEXT:    ret void
 ;
   %vla = alloca double, i32 %p1
-  call void @PR27277_part2(i32 ptrtoint (i32** @GV to i32))
+  call void @PR27277_part2(i32 ptrtoint (ptr @GV to i32))
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/always-inline-attr.ll b/llvm/test/Transforms/Inline/always-inline-attr.ll
index d411b96252196..08ea3079d3ff2 100644
--- a/llvm/test/Transforms/Inline/always-inline-attr.ll
+++ b/llvm/test/Transforms/Inline/always-inline-attr.ll
@@ -6,29 +6,29 @@ target triple = "x86_64-grtev4-linux-gnu"
 
 ; After AlwaysInline the callee's attributes should be merged into caller's attibutes.
 
-; CHECK:  define dso_local <2 x i64> @foo(<8 x i64>* byval(<8 x i64>) align 64 %0) #0
+; CHECK:  define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0) #0
 ; CHECK:  attributes #0 = { mustprogress uwtable "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512"
 
 ; Function Attrs: uwtable mustprogress
-define dso_local <2 x i64> @foo(<8 x i64>* byval(<8 x i64>) align 64 %0) #0 {
+define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0) #0 {
 entry:
   %kBias.addr = alloca <8 x i64>, align 64
   %indirect-arg-temp = alloca <8 x i64>, align 64
-  %kBias = load <8 x i64>, <8 x i64>* %0, align 64, !tbaa !2
-  store <8 x i64> %kBias, <8 x i64>* %kBias.addr, align 64, !tbaa !2
-  %1 = load <8 x i64>, <8 x i64>* %kBias.addr, align 64, !tbaa !2
-  store <8 x i64> %1, <8 x i64>* %indirect-arg-temp, align 64, !tbaa !2
-  %call = call <2 x i64> @bar(<8 x i64>* byval(<8 x i64>) align 64 %indirect-arg-temp)
+  %kBias = load <8 x i64>, ptr %0, align 64, !tbaa !2
+  store <8 x i64> %kBias, ptr %kBias.addr, align 64, !tbaa !2
+  %1 = load <8 x i64>, ptr %kBias.addr, align 64, !tbaa !2
+  store <8 x i64> %1, ptr %indirect-arg-temp, align 64, !tbaa !2
+  %call = call <2 x i64> @bar(ptr byval(<8 x i64>) align 64 %indirect-arg-temp)
   ret <2 x i64> %call
 }
 
 ; Function Attrs: alwaysinline nounwind uwtable mustprogress
-define internal <2 x i64> @bar(<8 x i64>* byval(<8 x i64>) align 64 %0) #1 {
+define internal <2 x i64> @bar(ptr byval(<8 x i64>) align 64 %0) #1 {
 entry:
   %__A.addr = alloca <8 x i64>, align 64
-  %__A = load <8 x i64>, <8 x i64>* %0, align 64, !tbaa !2
-  store <8 x i64> %__A, <8 x i64>* %__A.addr, align 64, !tbaa !2
-  %1 = load <8 x i64>, <8 x i64>* %__A.addr, align 64, !tbaa !2
+  %__A = load <8 x i64>, ptr %0, align 64, !tbaa !2
+  store <8 x i64> %__A, ptr %__A.addr, align 64, !tbaa !2
+  %1 = load <8 x i64>, ptr %__A.addr, align 64, !tbaa !2
   %2 = bitcast <8 x i64> %1 to <16 x i32>
   %3 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %2, <16 x i8> zeroinitializer, i16 -1)
   %4 = bitcast <16 x i8> %3 to <2 x i64>

diff  --git a/llvm/test/Transforms/Inline/always-inline-remark.ll b/llvm/test/Transforms/Inline/always-inline-remark.ll
index e328070589445..deac32da843a4 100644
--- a/llvm/test/Transforms/Inline/always-inline-remark.ll
+++ b/llvm/test/Transforms/Inline/always-inline-remark.ll
@@ -7,11 +7,11 @@ define void @foo() alwaysinline {
     ret void
 }
 
-define void @bar() alwaysinline personality void ()* @personalityFn1 {
+define void @bar() alwaysinline personality ptr @personalityFn1 {
     ret void
 }
 
-define void @goo() personality void ()* @personalityFn2 {
+define void @goo() personality ptr @personalityFn2 {
     ; CHECK-DAG: remark: {{.*}}: 'bar' is not inlined into 'goo': incompatible personality
     call void @bar()
     ; CHECK-DAG: remark: {{.*}}: 'foo' is not inlined into 'goo': unsupported operand bundle

diff  --git a/llvm/test/Transforms/Inline/always-inline.ll b/llvm/test/Transforms/Inline/always-inline.ll
index d84b7e7a62036..599b2d4de2619 100644
--- a/llvm/test/Transforms/Inline/always-inline.ll
+++ b/llvm/test/Transforms/Inline/always-inline.ll
@@ -83,10 +83,10 @@ entry:
 }
 
 ; We can't inline this even though it has alwaysinline!
-define internal i32 @inner5(i8* %addr) alwaysinline {
+define internal i32 @inner5(ptr %addr) alwaysinline {
 ; CHECK-LABEL: @inner5(
 entry:
-  indirectbr i8* %addr, [ label %one, label %two ]
+  indirectbr ptr %addr, [ label %one, label %two ]
 
 one:
   ret i32 42
@@ -100,8 +100,8 @@ define i32 @outer5(i32 %x) {
 ; CHECK: ret
 
   %cmp = icmp slt i32 %x, 42
-  %addr = select i1 %cmp, i8* blockaddress(@inner5, %one), i8* blockaddress(@inner5, %two)
-  %call = call i32 @inner5(i8* %addr)
+  %addr = select i1 %cmp, ptr blockaddress(@inner5, %one), ptr blockaddress(@inner5, %two)
+  %call = call i32 @inner5(ptr %addr)
   ret i32 %call
 }
 
@@ -143,17 +143,17 @@ define i32 @outer7() {
    ret i32 %r
 }
 
-define internal float* @inner8(float* nocapture align 128 %a) alwaysinline {
+define internal ptr @inner8(ptr nocapture align 128 %a) alwaysinline {
 ; CHECK-NOT: @inner8(
-  ret float* %a
+  ret ptr %a
 }
-define float @outer8(float* nocapture %a) {
+define float @outer8(ptr nocapture %a) {
 ; CHECK-LABEL: @outer8(
-; CHECK-NOT: call float* @inner8
+; CHECK-NOT: call ptr @inner8
 ; CHECK: ret
 
-  %inner_a = call float* @inner8(float* %a)
-  %f = load float, float* %inner_a, align 4
+  %inner_a = call ptr @inner8(ptr %a)
+  %f = load float, ptr %inner_a, align 4
   ret float %f
 }
 
@@ -188,7 +188,7 @@ entry:
   ; this the function can't be deleted because of the constant expression
   ; usage.
   %sink = alloca i1
-  store volatile i1 icmp eq (i64 ptrtoint (void (i1)* @inner9a to i64), i64 ptrtoint(void (i1)* @dummy9 to i64)), i1* %sink
+  store volatile i1 icmp eq (i64 ptrtoint (ptr @inner9a to i64), i64 ptrtoint(ptr @dummy9 to i64)), ptr %sink
 ; CHECK: store volatile
   call void @inner9a(i1 false)
 ; CHECK-NOT: call void @inner9a
@@ -196,7 +196,7 @@ entry:
   ; Next we call @inner9b passing in a constant expression. This constant
   ; expression will in fact be removed by inlining, so we should also be able
   ; to delete the function.
-  call void @inner9b(i1 icmp eq (i64 ptrtoint (void (i1)* @inner9b to i64), i64 ptrtoint(void (i1)* @dummy9 to i64)))
+  call void @inner9b(i1 icmp eq (i64 ptrtoint (ptr @inner9b to i64), i64 ptrtoint(ptr @dummy9 to i64)))
 ; CHECK-NOT: @inner9b
 
   ret void

diff  --git a/llvm/test/Transforms/Inline/arg-attr-propagation.ll b/llvm/test/Transforms/Inline/arg-attr-propagation.ll
index 3cacda9357655..7b096539e7e1b 100644
--- a/llvm/test/Transforms/Inline/arg-attr-propagation.ll
+++ b/llvm/test/Transforms/Inline/arg-attr-propagation.ll
@@ -5,74 +5,74 @@
 ; The callee guarantees that the pointer argument is nonnull and dereferenceable.
 ; That information should transfer to the caller.
 
-define i32 @callee(i32* dereferenceable(32) %t1) {
+define i32 @callee(ptr dereferenceable(32) %t1) {
 ; CHECK-LABEL: define {{[^@]+}}@callee
-; CHECK-SAME: (i32* dereferenceable(32) [[T1:%.*]]) {
-; CHECK-NEXT:    [[T2:%.*]] = load i32, i32* [[T1]], align 4
+; CHECK-SAME: (ptr dereferenceable(32) [[T1:%.*]]) {
+; CHECK-NEXT:    [[T2:%.*]] = load i32, ptr [[T1]], align 4
 ; CHECK-NEXT:    ret i32 [[T2]]
 ;
-  %t2 = load i32, i32* %t1
+  %t2 = load i32, ptr %t1
   ret i32 %t2
 }
 
-define i32 @callee2(i32* dereferenceable(32) %t1, i32 noundef %t2) {
+define i32 @callee2(ptr dereferenceable(32) %t1, i32 noundef %t2) {
 ; CHECK-LABEL: define {{[^@]+}}@callee2
-; CHECK-SAME: (i32* dereferenceable(32) [[T1:%.*]], i32 noundef [[T2:%.*]]) {
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[T1]], align 4
+; CHECK-SAME: (ptr dereferenceable(32) [[T1:%.*]], i32 noundef [[T2:%.*]]) {
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[T1]], align 4
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
-  %v = load i32, i32* %t1
+  %v = load i32, ptr %t1
   ret i32 %v
 }
 
 ; FIXME: All dereferenceability information is lost.
 ; The caller argument could be known nonnull and dereferenceable(32).
 
-define i32 @caller1(i32* %t1, i32 %t2) {
+define i32 @caller1(ptr %t1, i32 %t2) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@caller1
-; NO_ASSUME-SAME: (i32* [[T1:%.*]], i32 [[T2:%.*]]) {
-; NO_ASSUME-NEXT:    [[V_I:%.*]] = load i32, i32* [[T1]], align 4
+; NO_ASSUME-SAME: (ptr [[T1:%.*]], i32 [[T2:%.*]]) {
+; NO_ASSUME-NEXT:    [[V_I:%.*]] = load i32, ptr [[T1]], align 4
 ; NO_ASSUME-NEXT:    ret i32 [[V_I]]
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@caller1
-; USE_ASSUME-SAME: (i32* [[T1:%.*]], i32 [[T2:%.*]]) {
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[T1]], i64 32), "noundef"(i32 [[T2]]) ]
-; USE_ASSUME-NEXT:    [[V_I:%.*]] = load i32, i32* [[T1]], align 4
+; USE_ASSUME-SAME: (ptr [[T1:%.*]], i32 [[T2:%.*]]) {
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[T1]], i64 32), "noundef"(i32 [[T2]]) ]
+; USE_ASSUME-NEXT:    [[V_I:%.*]] = load i32, ptr [[T1]], align 4
 ; USE_ASSUME-NEXT:    ret i32 [[V_I]]
 ;
-  %v = tail call i32 @callee2(i32* dereferenceable(32) %t1, i32 noundef %t2)
+  %v = tail call i32 @callee2(ptr dereferenceable(32) %t1, i32 noundef %t2)
   ret i32 %v
 }
 
 ; The caller argument is nonnull, but that can be explicit.
 ; The dereferenceable amount could be increased.
 
-define i32 @caller2(i32* dereferenceable(31) %t1) {
+define i32 @caller2(ptr dereferenceable(31) %t1) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@caller2
-; NO_ASSUME-SAME: (i32* dereferenceable(31) [[T1:%.*]])
-; NO_ASSUME-NEXT:    [[T2_I:%.*]] = load i32, i32* [[T1]]
+; NO_ASSUME-SAME: (ptr dereferenceable(31) [[T1:%.*]])
+; NO_ASSUME-NEXT:    [[T2_I:%.*]] = load i32, ptr [[T1]]
 ; NO_ASSUME-NEXT:    ret i32 [[T2_I]]
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@caller2
-; USE_ASSUME-SAME: (i32* dereferenceable(31) [[T1:%.*]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[T1]], i64 32) ]
-; USE_ASSUME-NEXT:    [[T2_I:%.*]] = load i32, i32* [[T1]]
+; USE_ASSUME-SAME: (ptr dereferenceable(31) [[T1:%.*]])
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[T1]], i64 32) ]
+; USE_ASSUME-NEXT:    [[T2_I:%.*]] = load i32, ptr [[T1]]
 ; USE_ASSUME-NEXT:    ret i32 [[T2_I]]
 ;
-  %t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
+  %t2 = tail call i32 @callee(ptr dereferenceable(32) %t1)
   ret i32 %t2
 }
 
 ; The caller argument is nonnull, but that can be explicit.
 ; Make sure that we don't propagate a smaller dereferenceable amount.
 
-define i32 @caller3(i32* dereferenceable(33) %t1) {
+define i32 @caller3(ptr dereferenceable(33) %t1) {
 ; CHECK-LABEL: define {{[^@]+}}@caller3
-; CHECK-SAME: (i32* dereferenceable(33) [[T1:%.*]])
-; CHECK-NEXT:    [[T2_I:%.*]] = load i32, i32* [[T1]]
+; CHECK-SAME: (ptr dereferenceable(33) [[T1:%.*]])
+; CHECK-NEXT:    [[T2_I:%.*]] = load i32, ptr [[T1]]
 ; CHECK-NEXT:    ret i32 [[T2_I]]
 ;
-  %t2 = tail call i32 @callee(i32* dereferenceable(32) %t1)
+  %t2 = tail call i32 @callee(ptr dereferenceable(32) %t1)
   ret i32 %t2
 }
 

diff  --git a/llvm/test/Transforms/Inline/basictest.ll b/llvm/test/Transforms/Inline/basictest.ll
index ebe7162887e1d..c8caa592415c5 100644
--- a/llvm/test/Transforms/Inline/basictest.ll
+++ b/llvm/test/Transforms/Inline/basictest.ll
@@ -22,24 +22,22 @@ define i32 @test1(i32 %W) {
 %T = type { i32, i32 }
 
 ; CHECK-NOT: @test2f(
-define internal %T* @test2f(i1 %cond, %T* %P) {
+define internal ptr @test2f(i1 %cond, ptr %P) {
   br i1 %cond, label %T, label %F
 
 T:
-  %A = getelementptr %T, %T* %P, i32 0, i32 0
-  store i32 42, i32* %A
-  ret %T* %P
+  store i32 42, ptr %P
+  ret ptr %P
 
 F:
-  ret %T* %P
+  ret ptr %P
 }
 
 define i32 @test2(i1 %cond) {
   %A = alloca %T
 
-  %B = call %T* @test2f(i1 %cond, %T* %A)
-  %C = getelementptr %T, %T* %B, i32 0, i32 0
-  %D = load i32, i32* %C
+  %B = call ptr @test2f(i1 %cond, ptr %A)
+  %D = load i32, ptr %B
   ret i32 %D
 
 ; CHECK-LABEL: @test2(
@@ -94,7 +92,7 @@ define i32 @test() {
 
 ; Inliner shouldn't delete calls it can't inline, even if they're trivially dead
 ; CHECK-LABEL: @outer4(
-define void @outer4(void ()* %inner4) {
+define void @outer4(ptr %inner4) {
 entry:
 ; CHECK: call void %inner4()
   call void %inner4() nounwind readnone
@@ -103,7 +101,7 @@ entry:
 
 declare void @inner5_inner()
 
-define void @inner5(void ()* %x) {
+define void @inner5(ptr %x) {
   call void %x() nounwind readnone
   ret void
 }
@@ -112,6 +110,6 @@ define void @inner5(void ()* %x) {
 ; CHECK-LABEL: @outer5(
 define void @outer5() {
 ; CHECK: call void @inner5_inner(
-  call void @inner5(void ()* @inner5_inner)
+  call void @inner5(ptr @inner5_inner)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/bfi-update.ll b/llvm/test/Transforms/Inline/bfi-update.ll
index 94584e2e6ce55..3f3bde12b2a04 100644
--- a/llvm/test/Transforms/Inline/bfi-update.ll
+++ b/llvm/test/Transforms/Inline/bfi-update.ll
@@ -54,7 +54,7 @@ define i32 @e(i32 %c1) !prof !24 {
 
 cond_false:
   call void @ext()
-  %c2 = load i32, i32* @data, align 4
+  %c2 = load i32, ptr @data, align 4
   %c3 = add i32 %c1, %c2
   %c4 = mul i32 %c3, %c2
   %c5 = add i32 %c4, %c2

diff  --git a/llvm/test/Transforms/Inline/blockaddress.ll b/llvm/test/Transforms/Inline/blockaddress.ll
index 37419aff2890c..aded280c1ed6c 100644
--- a/llvm/test/Transforms/Inline/blockaddress.ll
+++ b/llvm/test/Transforms/Inline/blockaddress.ll
@@ -4,18 +4,18 @@
 
 ; Make sure doit is not inlined since the blockaddress is taken
 ; which could be unsafe
-; CHECK: store i8* blockaddress(@doit, %here), i8** %pptr, align 8
+; CHECK: store ptr blockaddress(@doit, %here), ptr %pptr, align 8
 
 @i = global i32 1, align 4
- at ptr1 = common global i8* null, align 8
+ at ptr1 = common global ptr null, align 8
 
-define void @doit(i8** nocapture %pptr, i32 %cond) nounwind uwtable {
+define void @doit(ptr nocapture %pptr, i32 %cond) nounwind uwtable {
 entry:
   %tobool = icmp eq i32 %cond, 0
   br i1 %tobool, label %if.end, label %here
 
 here:
-  store i8* blockaddress(@doit, %here), i8** %pptr, align 8
+  store ptr blockaddress(@doit, %here), ptr %pptr, align 8
   br label %if.end
 
 if.end:
@@ -24,14 +24,14 @@ if.end:
 
 define void @f(i32 %cond) nounwind uwtable {
 entry:
-  call void @doit(i8** @ptr1, i32 %cond)
+  call void @doit(ptr @ptr1, i32 %cond)
   ret void
 }
 
 ; PR27233: We can inline @run into @init.  Don't crash on it.
 ;
 ; CHECK-LABEL: define void @init
-; CHECK:         store i8* blockaddress(@run, %bb)
+; CHECK:         store ptr blockaddress(@run, %bb)
 ; CHECK-SAME:        @run.bb
 define void @init() {
 entry:
@@ -41,41 +41,41 @@ entry:
 
 define void @run() {
 entry:
-  store i8* blockaddress(@run, %bb), i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @run.bb, i64 0, i64 0), align 8
+  store ptr blockaddress(@run, %bb), ptr @run.bb, align 8
   ret void
 
 bb:
   unreachable
 }
 
- at run.bb = global [1 x i8*] zeroinitializer
+ at run.bb = global [1 x ptr] zeroinitializer
 
 ; Check that a function referenced by a global blockaddress wont be inlined,
 ; even if it contains a callbr. We might be able to relax this in the future
 ; as long as the global blockaddress is updated correctly.
- at ba = internal global i8* blockaddress(@foo, %7), align 8
+ at ba = internal global ptr blockaddress(@foo, %7), align 8
 define internal i32 @foo(i32) {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   callbr void asm sideeffect "testl $0, $0; jne ${1:l};", "r,!i,!i,~{dirflag},~{fpsr},~{flags}"(i32 %4) #1
           to label %5 [label %7, label %6]
 
 ; <label>:5:                                      ; preds = %1
-  store i32 0, i32* %2, align 4
+  store i32 0, ptr %2, align 4
   br label %8
 
 ; <label>:6:                                      ; preds = %1
-  store i32 1, i32* %2, align 4
+  store i32 1, ptr %2, align 4
   br label %8
 
 ; <label>:7:                                      ; preds = %1
-  store i32 2, i32* %2, align 4
+  store i32 2, ptr %2, align 4
   br label %8
 
 ; <label>:8:                                      ; preds = %7, %6, %5
-  %9 = load i32, i32* %2, align 4
+  %9 = load i32, ptr %2, align 4
   ret i32 %9
 }
 define dso_local i32 @bar() {
@@ -91,32 +91,32 @@ define dso_local i32 @bar() {
 ; Triple check that even with a global aggregate whose member is a blockaddress,
 ; we still don't inline referred to functions.
 
-%struct.foo = type { i8* }
+%struct.foo = type { ptr }
 
- at my_foo = dso_local global %struct.foo { i8* blockaddress(@baz, %7) }
+ at my_foo = dso_local global %struct.foo { ptr blockaddress(@baz, %7) }
 
 define internal i32 @baz(i32) {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   callbr void asm sideeffect "testl $0, $0; jne ${1:l};", "r,!i,!i,~{dirflag},~{fpsr},~{flags}"(i32 %4) #1
           to label %5 [label %7, label %6]
 
 ; <label>:5:                                      ; preds = %1
-  store i32 0, i32* %2, align 4
+  store i32 0, ptr %2, align 4
   br label %8
 
 ; <label>:6:                                      ; preds = %1
-  store i32 1, i32* %2, align 4
+  store i32 1, ptr %2, align 4
   br label %8
 
 ; <label>:7:                                      ; preds = %1
-  store i32 2, i32* %2, align 4
+  store i32 2, ptr %2, align 4
   br label %8
 
 ; <label>:8:                                      ; preds = %7, %6, %5
-  %9 = load i32, i32* %2, align 4
+  %9 = load i32, ptr %2, align 4
   ret i32 %9
 }
 define dso_local i32 @quux() {

diff  --git a/llvm/test/Transforms/Inline/byref-align.ll b/llvm/test/Transforms/Inline/byref-align.ll
index 92b16398ba0f3..128a3c9148b75 100644
--- a/llvm/test/Transforms/Inline/byref-align.ll
+++ b/llvm/test/Transforms/Inline/byref-align.ll
@@ -6,43 +6,43 @@ target triple = "x86_64-unknown-linux-gnu"
 ; Test behavior of inserted alignment assumptions with byref. There is
 ; no implied copy to a higher alignment, so an alignment assume call
 ; should be inserted.
-define void @byref_callee(float* align(128) byref(float) nocapture %a, float* %b) #0 {
+define void @byref_callee(ptr align(128) byref(float) nocapture %a, ptr %b) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@byref_callee
-; CHECK-SAME: (float* nocapture byref(float) align 128 [[A:%.*]], float* [[B:%.*]]) [[ATTR0:#.*]] {
+; CHECK-SAME: (ptr nocapture byref(float) align 128 [[A:%.*]], ptr [[B:%.*]]) [[ATTR0:#.*]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOAD:%.*]] = load float, float* [[A]], align 4
-; CHECK-NEXT:    [[B_IDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[A]], align 4
+; CHECK-NEXT:    [[B_IDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
 ; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[LOAD]], 2.000000e+00
-; CHECK-NEXT:    store float [[ADD]], float* [[B_IDX]], align 4
+; CHECK-NEXT:    store float [[ADD]], ptr [[B_IDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %load = load float, float* %a, align 4
-  %b.idx = getelementptr inbounds float, float* %b, i64 8
+  %load = load float, ptr %a, align 4
+  %b.idx = getelementptr inbounds float, ptr %b, i64 8
   %add = fadd float %load, 2.0
-  store float %add, float* %b.idx, align 4
+  store float %add, ptr %b.idx, align 4
   ret void
 }
 
-define void @byref_caller(float* nocapture align 64 %a, float* %b) #0 {
+define void @byref_caller(ptr nocapture align 64 %a, ptr %b) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@byref_caller
-; CHECK-SAME: (float* nocapture align 64 [[A:%.*]], float* [[B:%.*]]) [[ATTR0]] {
+; CHECK-SAME: (ptr nocapture align 64 [[A:%.*]], ptr [[B:%.*]]) [[ATTR0]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]
-; CHECK-NEXT:    [[LOAD_I:%.*]] = load float, float* [[A]], align 4
-; CHECK-NEXT:    [[B_IDX_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 128) ]
+; CHECK-NEXT:    [[LOAD_I:%.*]] = load float, ptr [[A]], align 4
+; CHECK-NEXT:    [[B_IDX_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
 ; CHECK-NEXT:    [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00
-; CHECK-NEXT:    store float [[ADD_I]], float* [[B_IDX_I]], align 4
-; CHECK-NEXT:    [[CALLER_LOAD:%.*]] = load float, float* [[B]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[CALLER_LOAD]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    store float [[ADD_I]], ptr [[B_IDX_I]], align 4
+; CHECK-NEXT:    [[CALLER_LOAD:%.*]] = load float, ptr [[B]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[CALLER_LOAD]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @byref_callee(float* align(128) byref(float) %a, float* %b)
-  %caller.load = load float, float* %b, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %caller.load, float* %arrayidx, align 4
+  call void @byref_callee(ptr align(128) byref(float) %a, ptr %b)
+  %caller.load = load float, ptr %b, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %caller.load, ptr %arrayidx, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/byval-align.ll b/llvm/test/Transforms/Inline/byval-align.ll
index 045b17c1572fe..11a3330fc6d7a 100644
--- a/llvm/test/Transforms/Inline/byval-align.ll
+++ b/llvm/test/Transforms/Inline/byval-align.ll
@@ -5,50 +5,46 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Test behavior of inserted alignment assumptions with byval. No
 ; assume should be inserted.
-define void @byval_callee(float* byval(float) align 128 nocapture %a, float* %b) #0 {
+define void @byval_callee(ptr byval(float) align 128 nocapture %a, ptr %b) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@byval_callee
-; CHECK-SAME: (float* nocapture byval(float) align 128 [[A:%.*]], float* [[B:%.*]]) #0
+; CHECK-SAME: (ptr nocapture byval(float) align 128 [[A:%.*]], ptr [[B:%.*]]) #0
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOAD:%.*]] = load float, float* [[A]], align 4
-; CHECK-NEXT:    [[B_IDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[A]], align 4
+; CHECK-NEXT:    [[B_IDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
 ; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[LOAD]], 2.000000e+00
-; CHECK-NEXT:    store float [[ADD]], float* [[B_IDX]], align 4
+; CHECK-NEXT:    store float [[ADD]], ptr [[B_IDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %load = load float, float* %a, align 4
-  %b.idx = getelementptr inbounds float, float* %b, i64 8
+  %load = load float, ptr %a, align 4
+  %b.idx = getelementptr inbounds float, ptr %b, i64 8
   %add = fadd float %load, 2.0
-  store float %add, float* %b.idx, align 4
+  store float %add, ptr %b.idx, align 4
   ret void
 }
 
-define void @byval_caller(float* nocapture align 64 %a, float* %b) #0 {
+define void @byval_caller(ptr nocapture align 64 %a, ptr %b) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@byval_caller
-; CHECK-SAME: (float* nocapture align 64 [[A:%.*]], float* [[B:%.*]]) #0
+; CHECK-SAME: (ptr nocapture align 64 [[A:%.*]], ptr [[B:%.*]]) #0
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A1:%.*]] = alloca float, align 128
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[A1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP0]])
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[A1]] to i8*
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[TMP1]], i8* align 1 [[TMP2]], i64 4, i1 false)
-; CHECK-NEXT:    [[LOAD_I:%.*]] = load float, float* [[A1]], align 4
-; CHECK-NEXT:    [[B_IDX_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[A1]], ptr align 1 [[A]], i64 4, i1 false)
+; CHECK-NEXT:    [[LOAD_I:%.*]] = load float, ptr [[A1]], align 4
+; CHECK-NEXT:    [[B_IDX_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
 ; CHECK-NEXT:    [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00
-; CHECK-NEXT:    store float [[ADD_I]], float* [[B_IDX_I]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[A1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP3]])
-; CHECK-NEXT:    [[CALLER_LOAD:%.*]] = load float, float* [[B]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[CALLER_LOAD]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    store float [[ADD_I]], ptr [[B_IDX_I]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT:    [[CALLER_LOAD:%.*]] = load float, ptr [[B]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[CALLER_LOAD]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @byval_callee(float* byval(float) align 128 %a, float* %b)
-  %caller.load = load float, float* %b, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %caller.load, float* %arrayidx, align 4
+  call void @byval_callee(ptr byval(float) align 128 %a, ptr %b)
+  %caller.load = load float, ptr %b, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %caller.load, ptr %arrayidx, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/byval-tail-call.ll b/llvm/test/Transforms/Inline/byval-tail-call.ll
index 4d6ef5b97dbb4..808104c591791 100644
--- a/llvm/test/Transforms/Inline/byval-tail-call.ll
+++ b/llvm/test/Transforms/Inline/byval-tail-call.ll
@@ -8,97 +8,89 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
 target triple = "i386-pc-linux-gnu"
 
-declare void @ext(i32*)
+declare void @ext(ptr)
 
-define void @bar(i32* byval(i32) %x) {
+define void @bar(ptr byval(i32) %x) {
 ; CHECK-LABEL: @bar(
-; CHECK-NEXT:    call void @ext(i32* nonnull [[X:%.*]])
+; CHECK-NEXT:    call void @ext(ptr nonnull [[X:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @ext(i32* %x)
+  call void @ext(ptr %x)
   ret void
 }
 
-define void @foo(i32* %x) {
+define void @foo(ptr %x) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:    [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X:%.*]], align 1
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[X1]], align 4
-; CHECK-NEXT:    call void @ext(i32* nonnull [[X1]])
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP3]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[X1]], align 4
+; CHECK-NEXT:    call void @ext(ptr nonnull [[X1]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @bar(i32* byval(i32) %x)
+  call void @bar(ptr byval(i32) %x)
   ret void
 }
 
-define internal void @qux(i32* byval(i32) %x) {
-  call void @ext(i32* %x)
-  tail call void @ext(i32* null)
+define internal void @qux(ptr byval(i32) %x) {
+  call void @ext(ptr %x)
+  tail call void @ext(ptr null)
   ret void
 }
 
-define void @frob(i32* %x) {
+define void @frob(ptr %x) {
 ; CHECK-LABEL: @frob(
 ; CHECK-NEXT:    [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X:%.*]], align 1
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[X1]], align 4
-; CHECK-NEXT:    call void @ext(i32* nonnull [[X1]])
-; CHECK-NEXT:    tail call void @ext(i32* null)
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP3]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[X1]], align 4
+; CHECK-NEXT:    call void @ext(ptr nonnull [[X1]])
+; CHECK-NEXT:    tail call void @ext(ptr null)
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @qux(i32* byval(i32) %x)
+  tail call void @qux(ptr byval(i32) %x)
   ret void
 }
 
 ; A byval parameter passed into a function which is passed out as byval does
 ; not block the call from being marked as tail.
 
-declare void @ext2(i32* byval(i32))
+declare void @ext2(ptr byval(i32))
 
-define void @bar2(i32* byval(i32) %x) {
+define void @bar2(ptr byval(i32) %x) {
 ; CHECK-LABEL: @bar2(
-; CHECK-NEXT:    tail call void @ext2(i32* nonnull byval(i32) [[X:%.*]])
+; CHECK-NEXT:    tail call void @ext2(ptr nonnull byval(i32) [[X:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @ext2(i32* byval(i32) %x)
+  call void @ext2(ptr byval(i32) %x)
   ret void
 }
 
-define void @foobar(i32* %x) {
+define void @foobar(ptr %x) {
 ; CHECK-LABEL: @foobar(
 ; CHECK-NEXT:    [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[X:%.*]], align 1
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[X1]], align 4
-; CHECK-NEXT:    tail call void @ext2(i32* nonnull byval(i32) [[X1]])
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP3]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[X1]], align 4
+; CHECK-NEXT:    tail call void @ext2(ptr nonnull byval(i32) [[X1]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @bar2(i32* byval(i32) %x)
+  tail call void @bar2(ptr byval(i32) %x)
   ret void
 }
 
 define void @barfoo() {
 ; CHECK-LABEL: @barfoo(
 ; CHECK-NEXT:    [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP1]])
-; CHECK-NEXT:    tail call void @ext2(i32* nonnull byval(i32) [[X1]])
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[X1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP2]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT:    tail call void @ext2(ptr nonnull byval(i32) [[X1]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
 ; CHECK-NEXT:    ret void
 ;
   %x = alloca i32
-  tail call void @bar2(i32* byval(i32) %x)
+  tail call void @bar2(ptr byval(i32) %x)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/byval.ll b/llvm/test/Transforms/Inline/byval.ll
index 0fd6ada91d477..d7e6efcb1a2c3 100644
--- a/llvm/test/Transforms/Inline/byval.ll
+++ b/llvm/test/Transforms/Inline/byval.ll
@@ -11,27 +11,27 @@ target datalayout = "p:32:32-p1:64:64-p2:16:16-n16:32:64"
 ; Inlining a byval struct should cause an explicit copy into an alloca.
 
 	%struct.ss = type { i32, i64 }
- at .str = internal constant [10 x i8] c"%d, %lld\0A\00"		; <[10 x i8]*> [#uses=1]
+ at .str = internal constant [10 x i8] c"%d, %lld\0A\00"		; <ptr> [#uses=1]
 
-define internal void @f(%struct.ss* byval(%struct.ss)  %b) nounwind  {
+define internal void @f(ptr byval(%struct.ss)  %b) nounwind  {
 entry:
-	%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0		; <i32*> [#uses=2]
-	%tmp1 = load i32, i32* %tmp, align 4		; <i32> [#uses=1]
+	%tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0		; <ptr> [#uses=2]
+	%tmp1 = load i32, ptr %tmp, align 4		; <i32> [#uses=1]
 	%tmp2 = add i32 %tmp1, 1		; <i32> [#uses=1]
-	store i32 %tmp2, i32* %tmp, align 4
+	store i32 %tmp2, ptr %tmp, align 4
 	ret void
 }
 
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
 
 define i32 @test1() nounwind  {
 entry:
-	%S = alloca %struct.ss		; <%struct.ss*> [#uses=4]
-	%tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0		; <i32*> [#uses=1]
-	store i32 1, i32* %tmp1, align 8
-	%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1		; <i64*> [#uses=1]
-	store i64 2, i64* %tmp4, align 4
-	call void @f(%struct.ss* byval(%struct.ss) %S) nounwind
+	%S = alloca %struct.ss		; <ptr> [#uses=4]
+	%tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0		; <ptr> [#uses=1]
+	store i32 1, ptr %tmp1, align 8
+	%tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1		; <ptr> [#uses=1]
+	store i64 2, ptr %tmp4, align 4
+	call void @f(ptr byval(%struct.ss) %S) nounwind
 	ret i32 0
 ; CHECK: @test1()
 ; CHECK: %S1 = alloca %struct.ss
@@ -43,22 +43,22 @@ entry:
 ; Inlining a byval struct should NOT cause an explicit copy
 ; into an alloca if the function is readonly
 
-define internal i32 @f2(%struct.ss* byval(%struct.ss)  %b) nounwind readonly {
+define internal i32 @f2(ptr byval(%struct.ss)  %b) nounwind readonly {
 entry:
-	%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0		; <i32*> [#uses=2]
-	%tmp1 = load i32, i32* %tmp, align 4		; <i32> [#uses=1]
+	%tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0		; <ptr> [#uses=2]
+	%tmp1 = load i32, ptr %tmp, align 4		; <i32> [#uses=1]
 	%tmp2 = add i32 %tmp1, 1		; <i32> [#uses=1]
 	ret i32 %tmp2
 }
 
 define i32 @test2() nounwind  {
 entry:
-	%S = alloca %struct.ss		; <%struct.ss*> [#uses=4]
-	%tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0		; <i32*> [#uses=1]
-	store i32 1, i32* %tmp1, align 8
-	%tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1		; <i64*> [#uses=1]
-	store i64 2, i64* %tmp4, align 4
-	%X = call i32 @f2(%struct.ss* byval(%struct.ss) %S) nounwind
+	%S = alloca %struct.ss		; <ptr> [#uses=4]
+	%tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0		; <ptr> [#uses=1]
+	store i32 1, ptr %tmp1, align 8
+	%tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1		; <ptr> [#uses=1]
+	store i64 2, ptr %tmp4, align 4
+	%X = call i32 @f2(ptr byval(%struct.ss) %S) nounwind
 	ret i32 %X
 ; CHECK: @test2()
 ; CHECK: %S = alloca %struct.ss
@@ -70,23 +70,23 @@ entry:
 ; Inlining a byval with an explicit alignment needs to use *at least* that
 ; alignment on the generated alloca.
 ; PR8769
-declare void @g3(%struct.ss* %p)
+declare void @g3(ptr %p)
 
-define internal void @f3(%struct.ss* byval(%struct.ss) align 64 %b) nounwind {
-   call void @g3(%struct.ss* %b)  ;; Could make alignment assumptions!
+define internal void @f3(ptr byval(%struct.ss) align 64 %b) nounwind {
+   call void @g3(ptr %b)  ;; Could make alignment assumptions!
    ret void
 }
 
 define void @test3() nounwind  {
 entry:
 	%S = alloca %struct.ss, align 1  ;; May not be aligned.
-	call void @f3(%struct.ss* byval(%struct.ss) align 64 %S) nounwind
+	call void @f3(ptr byval(%struct.ss) align 64 %S) nounwind
 	ret void
 ; CHECK: @test3()
 ; CHECK: %S1 = alloca %struct.ss, align 64
 ; CHECK: %S = alloca %struct.ss
 ; CHECK: call void @llvm.memcpy
-; CHECK: call void @g3(%struct.ss* %S1)
+; CHECK: call void @g3(ptr %S1)
 ; CHECK: ret void
 }
 
@@ -95,15 +95,15 @@ entry:
 ; into an alloca if the function is readonly, but should increase an alloca's
 ; alignment to satisfy an explicit alignment request.
 
-define internal i32 @f4(%struct.ss* byval(%struct.ss) align 64 %b) nounwind readonly {
-        call void @g3(%struct.ss* %b)
+define internal i32 @f4(ptr byval(%struct.ss) align 64 %b) nounwind readonly {
+        call void @g3(ptr %b)
 	ret i32 4
 }
 
 define i32 @test4() nounwind  {
 entry:
-	%S = alloca %struct.ss, align 2		; <%struct.ss*> [#uses=4]
-	%X = call i32 @f4(%struct.ss* byval(%struct.ss) align 64 %S) nounwind
+	%S = alloca %struct.ss, align 2		; <ptr> [#uses=4]
+	%X = call i32 @f4(ptr byval(%struct.ss) align 64 %S) nounwind
 	ret i32 %X
 ; CHECK: @test4()
 ; CHECK: %S = alloca %struct.ss, align 64
@@ -117,23 +117,22 @@ entry:
 @b = global %struct.S0 { i32 1 }, align 4
 @a = common global i32 0, align 4
 
-define internal void @f5(%struct.S0* byval(%struct.S0) nocapture readonly align 4 %p) {
+define internal void @f5(ptr byval(%struct.S0) nocapture readonly align 4 %p) {
 entry:
-	store i32 0, i32* getelementptr inbounds (%struct.S0, %struct.S0* @b, i64 0, i32 0), align 4
-	%f2 = getelementptr inbounds %struct.S0, %struct.S0* %p, i64 0, i32 0
-	%0 = load i32, i32* %f2, align 4
-	store i32 %0, i32* @a, align 4
+	store i32 0, ptr @b, align 4
+	%0 = load i32, ptr %p, align 4
+	store i32 %0, ptr @a, align 4
 	ret void
 }
 
 define i32 @test5() {
 entry:
-	tail call void @f5(%struct.S0* byval(%struct.S0) align 4 @b)
-	%0 = load i32, i32* @a, align 4
+	tail call void @f5(ptr byval(%struct.S0) align 4 @b)
+	%0 = load i32, ptr @a, align 4
 	ret i32 %0
 ; CHECK: @test5()
-; CHECK: store i32 0, i32* getelementptr inbounds (%struct.S0, %struct.S0* @b, i64 0, i32 0), align 4
-; CHECK-NOT: load i32, i32* getelementptr inbounds (%struct.S0, %struct.S0* @b, i64 0, i32 0), align 4
+; CHECK: store i32 0, ptr @b, align 4
+; CHECK-NOT: load i32, ptr @b, align 4
 }
 
 ; Inlining a byval struct that is in a 
diff erent address space compared to the
@@ -146,19 +145,18 @@ entry:
 @d = addrspace(1) global %struct.S1 { i32 1 }, align 4
 @c = common addrspace(1) global i32 0, align 4
 
-define internal void @f5_as1(%struct.S1 addrspace(1)* byval(%struct.S1) nocapture readonly align 4 %p) {
+define internal void @f5_as1(ptr addrspace(1) byval(%struct.S1) nocapture readonly align 4 %p) {
 entry:
-	store i32 0, i32 addrspace(1)* getelementptr inbounds (%struct.S1, %struct.S1 addrspace(1)* @d, i64 0, i32 0), align 4
-	%f2 = getelementptr inbounds %struct.S1, %struct.S1 addrspace(1)* %p, i64 0, i32 0
-	%0 = load i32, i32 addrspace(1)* %f2, align 4
-	store i32 %0, i32 addrspace(1)* @c, align 4
+	store i32 0, ptr addrspace(1) @d, align 4
+	%0 = load i32, ptr addrspace(1) %p, align 4
+	store i32 %0, ptr addrspace(1) @c, align 4
 	ret void
 }
 
 define i32 @test5_as1() {
 entry:
-	tail call void @f5_as1(%struct.S1 addrspace(1)* byval(%struct.S1) align 4 @d)
-	%0 = load i32, i32 addrspace(1)* @c, align 4
+	tail call void @f5_as1(ptr addrspace(1) byval(%struct.S1) align 4 @d)
+	%0 = load i32, ptr addrspace(1) @c, align 4
 	ret i32 %0
 ; CHECK: @test5_as1()
 ; CHECK: call void @f5_as1

diff  --git a/llvm/test/Transforms/Inline/byval_lifetime.ll b/llvm/test/Transforms/Inline/byval_lifetime.ll
index c6f7f82c03d8e..9f68cd49c4b4c 100644
--- a/llvm/test/Transforms/Inline/byval_lifetime.ll
+++ b/llvm/test/Transforms/Inline/byval_lifetime.ll
@@ -8,19 +8,19 @@
 
 @gFoo = global %struct.foo zeroinitializer, align 8
 
-define i32 @foo(%struct.foo* byval(%struct.foo) align 8 %f, i32 %a) {
+define i32 @foo(ptr byval(%struct.foo) align 8 %f, i32 %a) {
 entry:
-  %a1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 1
-  %arrayidx = getelementptr inbounds [16 x i32], [16 x i32]* %a1, i32 0, i32 %a
-  %tmp2 = load i32, i32* %arrayidx, align 1
+  %a1 = getelementptr inbounds %struct.foo, ptr %f, i32 0, i32 1
+  %arrayidx = getelementptr inbounds [16 x i32], ptr %a1, i32 0, i32 %a
+  %tmp2 = load i32, ptr %arrayidx, align 1
   ret i32 %tmp2
 }
 
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
 ; CHECK-LABEL: @main
 ; CHECK: llvm.lifetime.start
 ; CHECK: memcpy
 entry:
-  %call = call i32 @foo(%struct.foo* byval(%struct.foo) align 8 @gFoo, i32 %argc)
+  %call = call i32 @foo(ptr byval(%struct.foo) align 8 @gFoo, i32 %argc)
   ret i32 %call
 }

diff  --git a/llvm/test/Transforms/Inline/callgraph-update.ll b/llvm/test/Transforms/Inline/callgraph-update.ll
index 94a2b2c59e92d..a3111b2c72edf 100644
--- a/llvm/test/Transforms/Inline/callgraph-update.ll
+++ b/llvm/test/Transforms/Inline/callgraph-update.ll
@@ -21,7 +21,7 @@ define internal fastcc void @parse() {
 	ret void
 }
 
-define void @main() personality i32 (...)* @__gxx_personality_v0 {
+define void @main() personality ptr @__gxx_personality_v0 {
 	invoke fastcc void @parse()
 			to label %invcont unwind label %lpad
 
@@ -29,7 +29,7 @@ invcont:
 	unreachable
 
 lpad:
-        %exn = landingpad {i8*, i32}
+        %exn = landingpad {ptr, i32}
                  cleanup
 	unreachable
 }

diff  --git a/llvm/test/Transforms/Inline/cgscc-cycle.ll b/llvm/test/Transforms/Inline/cgscc-cycle.ll
index 763f5f0cfe792..fc9823c067ae2 100644
--- a/llvm/test/Transforms/Inline/cgscc-cycle.ll
+++ b/llvm/test/Transforms/Inline/cgscc-cycle.ll
@@ -10,115 +10,109 @@
 
 ; The `test1_*` collection of functions form a directly cycling pattern.
 
-define void @test1_a(i8** %ptr) {
+define void @test1_a(ptr %ptr) {
 ; CHECK-LABEL: define void @test1_a(
 entry:
-  call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 0)
+  call void @test1_b(ptr @test1_b, i1 false, i32 0)
 ; Inlining and simplifying this call will reliably produce the exact same call,
 ; over and over again. However, each inlining increments the count, and so we
 ; expect this test case to stop after one round of inlining with a final
 ; argument of '1'.
 ; CHECK-NOT:     call
-; CHECK:         call void @test1_b(i8* nonnull bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 1)
+; CHECK:         call void @test1_b(ptr nonnull @test1_b, i1 false, i32 1)
 ; CHECK-NOT:     call
 
   ret void
 }
 
-define void @test1_b(i8* %arg, i1 %flag, i32 %inline_count) {
+define void @test1_b(ptr %arg, i1 %flag, i32 %inline_count) {
 ; CHECK-LABEL: define void @test1_b(
 entry:
-  %a = alloca i8*
-  store i8* %arg, i8** %a
+  %a = alloca ptr
+  store ptr %arg, ptr %a
 ; This alloca and store should remain through any optimization.
 ; CHECK:         %[[A:.*]] = alloca
-; CHECK:         store i8* %arg, i8** %[[A]]
+; CHECK:         store ptr %arg, ptr %[[A]]
 
   br i1 %flag, label %bb1, label %bb2
 
 bb1:
-  call void @test1_a(i8** %a) noinline
+  call void @test1_a(ptr %a) noinline
   br label %bb2
 
 bb2:
-  %cast = bitcast i8** %a to void (i8*, i1, i32)**
-  %p = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %cast
+  %p = load ptr, ptr %a
   %inline_count_inc = add i32 %inline_count, 1
-  call void %p(i8* %arg, i1 %flag, i32 %inline_count_inc)
+  call void %p(ptr %arg, i1 %flag, i32 %inline_count_inc)
 ; And we should continue to load and call indirectly through optimization.
-; CHECK:         %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i1, i32)**
-; CHECK:         %[[P:.*]] = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %[[CAST]]
+; CHECK:         %[[P:.*]] = load ptr, ptr %[[A]]
 ; CHECK:         call void %[[P]](
 
   ret void
 }
 
-define void @test2_a(i8** %ptr) {
+define void @test2_a(ptr %ptr) {
 ; CHECK-LABEL: define void @test2_a(
 entry:
-  call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 0)
+  call void @test2_b(ptr @test2_b, ptr @test2_c, i1 false, i32 0)
 ; Inlining and simplifying this call will reliably produce the exact same call,
 ; but only after doing two rounds if inlining, first from @test2_b then
 ; @test2_c. We check the exact number of inlining rounds before we cut off to
 ; break the cycle by inspecting the last paramater that gets incremented with
 ; each inlined function body.
 ; CHECK-NOT:     call
-; CHECK:         call void @test2_b(i8* nonnull bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* nonnull bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 2)
+; CHECK:         call void @test2_b(ptr nonnull @test2_b, ptr nonnull @test2_c, i1 false, i32 2)
 ; CHECK-NOT:     call
   ret void
 }
 
-define void @test2_b(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
+define void @test2_b(ptr %arg1, ptr %arg2, i1 %flag, i32 %inline_count) {
 ; CHECK-LABEL: define void @test2_b(
 entry:
-  %a = alloca i8*
-  store i8* %arg2, i8** %a
+  %a = alloca ptr
+  store ptr %arg2, ptr %a
 ; This alloca and store should remain through any optimization.
 ; CHECK:         %[[A:.*]] = alloca
-; CHECK:         store i8* %arg2, i8** %[[A]]
+; CHECK:         store ptr %arg2, ptr %[[A]]
 
   br i1 %flag, label %bb1, label %bb2
 
 bb1:
-  call void @test2_a(i8** %a) noinline
+  call void @test2_a(ptr %a) noinline
   br label %bb2
 
 bb2:
-  %p = load i8*, i8** %a
-  %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
+  %p = load ptr, ptr %a
   %inline_count_inc = add i32 %inline_count, 1
-  call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
+  call void %p(ptr %arg1, ptr %arg2, i1 %flag, i32 %inline_count_inc)
 ; And we should continue to load and call indirectly through optimization.
-; CHECK:         %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
-; CHECK:         %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
+; CHECK:         %[[P:.*]] = load ptr, ptr %[[A]]
 ; CHECK:         call void %[[P]](
 
   ret void
 }
 
-define void @test2_c(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
+define void @test2_c(ptr %arg1, ptr %arg2, i1 %flag, i32 %inline_count) {
 ; CHECK-LABEL: define void @test2_c(
 entry:
-  %a = alloca i8*
-  store i8* %arg1, i8** %a
+  %a = alloca ptr
+  store ptr %arg1, ptr %a
 ; This alloca and store should remain through any optimization.
 ; CHECK:         %[[A:.*]] = alloca
-; CHECK:         store i8* %arg1, i8** %[[A]]
+; CHECK:         store ptr %arg1, ptr %[[A]]
 
   br i1 %flag, label %bb1, label %bb2
 
 bb1:
-  call void @test2_a(i8** %a) noinline
+  call void @test2_a(ptr %a) noinline
   br label %bb2
 
 bb2:
-  %p = load i8*, i8** %a
-  %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
+  %p = load ptr, ptr %a
   %inline_count_inc = add i32 %inline_count, 1
-  call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
+  call void %p(ptr %arg1, ptr %arg2, i1 %flag, i32 %inline_count_inc)
 ; And we should continue to load and call indirectly through optimization.
-; CHECK:         %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
-; CHECK:         %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
+; CHECK:         %[[P:.*]] = load ptr, ptr %[[A]]
 ; CHECK:         call void %[[P]](
 
   ret void
@@ -157,12 +151,12 @@ bb2:
 ; CHECK-LABEL: @test3_a(
 ; CHECK: tail call void @test3_b()
 ; CHECK-NEXT: tail call void @test3_d(i32 5)
-; CHECK-NEXT: %[[LD1:.*]] = load i64, i64* @a
+; CHECK-NEXT: %[[LD1:.*]] = load i64, ptr @a
 ; CHECK-NEXT: %[[ADD1:.*]] = add nsw i64 %[[LD1]], 1
-; CHECK-NEXT: store i64 %[[ADD1]], i64* @a
-; CHECK-NEXT: %[[LD2:.*]] = load i64, i64* @b
+; CHECK-NEXT: store i64 %[[ADD1]], ptr @a
+; CHECK-NEXT: %[[LD2:.*]] = load i64, ptr @b
 ; CHECK-NEXT: %[[ADD2:.*]] = add nsw i64 %[[LD2]], 5
-; CHECK-NEXT: store i64 %[[ADD2]], i64* @b
+; CHECK-NEXT: store i64 %[[ADD2]], ptr @b
 ; CHECK-NEXT: ret void
 
 ; Function Attrs: noinline
@@ -170,9 +164,9 @@ define void @test3_a() #0 {
 entry:
   tail call void @test3_b()
   tail call void @test3_c(i32 5)
-  %t0 = load i64, i64* @b
+  %t0 = load i64, ptr @b
   %add = add nsw i64 %t0, 5
-  store i64 %add, i64* @b
+  store i64 %add, ptr @b
   ret void
 }
 
@@ -180,9 +174,9 @@ entry:
 define void @test3_b() #0 {
 entry:
   tail call void @test3_a()
-  %t0 = load i64, i64* @a
+  %t0 = load i64, ptr @a
   %add = add nsw i64 %t0, 2
-  store i64 %add, i64* @a
+  store i64 %add, ptr @a
   ret void
 }
 
@@ -193,17 +187,17 @@ entry:
 
 if.then:                                          ; preds = %entry
   %call = tail call i64 @random()
-  %t0 = load i64, i64* @a
+  %t0 = load i64, ptr @a
   %add = add nsw i64 %t0, %call
-  store i64 %add, i64* @a
+  store i64 %add, ptr @a
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
   tail call void @test3_c(i32 %i)
   tail call void @test3_b()
-  %t6 = load i64, i64* @a
+  %t6 = load i64, ptr @a
   %add79 = add nsw i64 %t6, 3
-  store i64 %add79, i64* @a
+  store i64 %add79, ptr @a
   ret void
 }
 
@@ -214,16 +208,16 @@ entry:
 
 if.then:                                          ; preds = %entry
   %call = tail call i64 @random()
-  %t0 = load i64, i64* @a
+  %t0 = load i64, ptr @a
   %add = add nsw i64 %t0, %call
-  store i64 %add, i64* @a
+  store i64 %add, ptr @a
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
   tail call void @test3_d(i32 %i)
-  %t6 = load i64, i64* @a
+  %t6 = load i64, ptr @a
   %add85 = add nsw i64 %t6, 1
-  store i64 %add85, i64* @a
+  store i64 %add85, ptr @a
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/cgscc-incremental-invalidate.ll b/llvm/test/Transforms/Inline/cgscc-incremental-invalidate.ll
index 69f722d77abe3..7443581cf26b1 100644
--- a/llvm/test/Transforms/Inline/cgscc-incremental-invalidate.ll
+++ b/llvm/test/Transforms/Inline/cgscc-incremental-invalidate.ll
@@ -43,11 +43,11 @@ entry:
   br i1 %flag, label %then, label %else
 
 then:
-  store volatile i8 42, i8* %ptr
+  store volatile i8 42, ptr %ptr
   br label %return
 
 else:
-  store volatile i8 -42, i8* %ptr
+  store volatile i8 -42, ptr %ptr
   br label %return
 
 return:
@@ -125,15 +125,15 @@ entry:
 
 @test2_global = external global i32, align 4
 
-define void @test2_hoge(i1 (i32*)* %arg) {
+define void @test2_hoge(ptr %arg) {
 ; CHECK-LABEL: define void @test2_hoge(
 bb:
-  %tmp2 = call zeroext i1 %arg(i32* @test2_global)
+  %tmp2 = call zeroext i1 %arg(ptr @test2_global)
 ; CHECK: call zeroext i1 %arg(
   br label %bb3
 
 bb3:
-  %tmp5 = call zeroext i1 %arg(i32* @test2_global)
+  %tmp5 = call zeroext i1 %arg(ptr @test2_global)
 ; CHECK: call zeroext i1 %arg(
   br i1 %tmp5, label %bb3, label %bb6
 
@@ -141,7 +141,7 @@ bb6:
   ret void
 }
 
-define zeroext i1 @test2_widget(i32* %arg) {
+define zeroext i1 @test2_widget(ptr %arg) {
 ; CHECK-LABEL: define zeroext i1 @test2_widget(
 bb:
   %tmp1 = alloca i8, align 1
@@ -149,32 +149,32 @@ bb:
   call void @test2_quux()
 ; CHECK-NOT:     call
 ;
-; CHECK:         call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK:         call zeroext i1 @test2_widget(ptr @test2_global)
 ; CHECK-NEXT:    br label %[[NEW_BB:.*]]
 ;
 ; CHECK:       [[NEW_BB]]:
-; CHECK-NEXT:    call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK-NEXT:    call zeroext i1 @test2_widget(ptr @test2_global)
 ;
 ; CHECK:       {{.*}}:
 
-  call void @test2_hoge.1(i32* %arg)
+  call void @test2_hoge.1(ptr %arg)
 ; CHECK-NEXT:    call void @test2_hoge.1(
 
-  %tmp4 = call zeroext i1 @test2_barney(i32* %tmp2)
+  %tmp4 = call zeroext i1 @test2_barney(ptr %tmp2)
   %tmp5 = zext i1 %tmp4 to i32
-  store i32 %tmp5, i32* %tmp2, align 4
-  %tmp6 = call zeroext i1 @test2_barney(i32* null)
-  call void @test2_ham(i8* %tmp1)
+  store i32 %tmp5, ptr %tmp2, align 4
+  %tmp6 = call zeroext i1 @test2_barney(ptr null)
+  call void @test2_ham(ptr %tmp1)
 ; CHECK:         call void @test2_ham(
 
   call void @test2_quux()
 ; CHECK-NOT:     call
 ;
-; CHECK:         call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK:         call zeroext i1 @test2_widget(ptr @test2_global)
 ; CHECK-NEXT:    br label %[[NEW_BB:.*]]
 ;
 ; CHECK:       [[NEW_BB]]:
-; CHECK-NEXT:    call zeroext i1 @test2_widget(i32* @test2_global)
+; CHECK-NEXT:    call zeroext i1 @test2_widget(ptr @test2_global)
 ;
 ; CHECK:       {{.*}}:
   ret i1 true
@@ -184,12 +184,12 @@ bb:
 define internal void @test2_quux() {
 ; CHECK-NOT: @test2_quux
 bb:
-  call void @test2_hoge(i1 (i32*)* @test2_widget)
+  call void @test2_hoge(ptr @test2_widget)
   ret void
 }
 
-declare void @test2_hoge.1(i32*)
+declare void @test2_hoge.1(ptr)
 
-declare zeroext i1 @test2_barney(i32*)
+declare zeroext i1 @test2_barney(ptr)
 
-declare void @test2_ham(i8*)
+declare void @test2_ham(ptr)

diff  --git a/llvm/test/Transforms/Inline/cgscc-inline-replay.ll b/llvm/test/Transforms/Inline/cgscc-inline-replay.ll
index daf31be5ad516..eae1437ef0e33 100644
--- a/llvm/test/Transforms/Inline/cgscc-inline-replay.ll
+++ b/llvm/test/Transforms/Inline/cgscc-inline-replay.ll
@@ -70,13 +70,13 @@ define i32 @_Z3sumii(i32 %x, i32 %y) #0 !dbg !6 {
 entry:
   %x.addr = alloca i32, align 4
   %y.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  store i32 %y, i32* %y.addr, align 4
-  %tmp = load i32, i32* %x.addr, align 4, !dbg !8
-  %tmp1 = load i32, i32* %y.addr, align 4, !dbg !8
+  store i32 %x, ptr %x.addr, align 4
+  store i32 %y, ptr %y.addr, align 4
+  %tmp = load i32, ptr %x.addr, align 4, !dbg !8
+  %tmp1 = load i32, ptr %y.addr, align 4, !dbg !8
   %add = add nsw i32 %tmp, %tmp1, !dbg !8
-  %tmp2 = load i32, i32* %x.addr, align 4, !dbg !8
-  %tmp3 = load i32, i32* %y.addr, align 4, !dbg !8
+  %tmp2 = load i32, ptr %x.addr, align 4, !dbg !8
+  %tmp3 = load i32, ptr %y.addr, align 4, !dbg !8
   %call = call i32 @_Z3subii(i32 %tmp2, i32 %tmp3), !dbg !8
   ret i32 %add, !dbg !8
 }
@@ -85,10 +85,10 @@ define i32 @_Z3subii(i32 %x, i32 %y) #0 !dbg !9 {
 entry:
   %x.addr = alloca i32, align 4
   %y.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  store i32 %y, i32* %y.addr, align 4
-  %tmp = load i32, i32* %x.addr, align 4, !dbg !10
-  %tmp1 = load i32, i32* %y.addr, align 4, !dbg !10
+  store i32 %x, ptr %x.addr, align 4
+  store i32 %y, ptr %y.addr, align 4
+  %tmp = load i32, ptr %x.addr, align 4, !dbg !10
+  %tmp1 = load i32, ptr %y.addr, align 4, !dbg !10
   %add = sub nsw i32 %tmp, %tmp1, !dbg !10
   ret i32 %add, !dbg !11
 }
@@ -98,43 +98,43 @@ entry:
   %retval = alloca i32, align 4
   %s = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 0, i32* %retval
-  store i32 0, i32* %i, align 4, !dbg !13
+  store i32 0, ptr %retval
+  store i32 0, ptr %i, align 4, !dbg !13
   br label %while.cond, !dbg !14
 
 while.cond:                                       ; preds = %if.end, %entry
-  %tmp = load i32, i32* %i, align 4, !dbg !15
+  %tmp = load i32, ptr %i, align 4, !dbg !15
   %inc = add nsw i32 %tmp, 1, !dbg !15
-  store i32 %inc, i32* %i, align 4, !dbg !15
+  store i32 %inc, ptr %i, align 4, !dbg !15
   %cmp = icmp slt i32 %tmp, 400000000, !dbg !15
   br i1 %cmp, label %while.body, label %while.end, !dbg !15
 
 while.body:                                       ; preds = %while.cond
-  %tmp1 = load i32, i32* %i, align 4, !dbg !17
+  %tmp1 = load i32, ptr %i, align 4, !dbg !17
   %cmp1 = icmp ne i32 %tmp1, 100, !dbg !17
   br i1 %cmp1, label %if.then, label %if.else, !dbg !17
 
 if.then:                                          ; preds = %while.body
-  %tmp2 = load i32, i32* %i, align 4, !dbg !19
-  %tmp3 = load i32, i32* %s, align 4, !dbg !19
+  %tmp2 = load i32, ptr %i, align 4, !dbg !19
+  %tmp3 = load i32, ptr %s, align 4, !dbg !19
   %call = call i32 @_Z3sumii(i32 %tmp2, i32 %tmp3), !dbg !19
-  store i32 %call, i32* %s, align 4, !dbg !19
+  store i32 %call, ptr %s, align 4, !dbg !19
   br label %if.end, !dbg !19
 
 if.else:                                          ; preds = %while.body
-  store i32 30, i32* %s, align 4, !dbg !21
+  store i32 30, ptr %s, align 4, !dbg !21
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   br label %while.cond, !dbg !23
 
 while.end:                                        ; preds = %while.cond
-  %tmp4 = load i32, i32* %s, align 4, !dbg !25
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 %tmp4), !dbg !25
+  %tmp4 = load i32, ptr %s, align 4, !dbg !25
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %tmp4), !dbg !25
   ret i32 0, !dbg !26
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 attributes #0 = { "use-sample-profile" }
 

diff  --git a/llvm/test/Transforms/Inline/cgscc-invalidate.ll b/llvm/test/Transforms/Inline/cgscc-invalidate.ll
index 725a28e2b1632..b5c8792eb7807 100644
--- a/llvm/test/Transforms/Inline/cgscc-invalidate.ll
+++ b/llvm/test/Transforms/Inline/cgscc-invalidate.ll
@@ -24,11 +24,11 @@ entry:
   br i1 %flag, label %then, label %else
 
 then:
-  store volatile i8 42, i8* %ptr
+  store volatile i8 42, ptr %ptr
   br label %return
 
 else:
-  store volatile i8 -42, i8* %ptr
+  store volatile i8 -42, ptr %ptr
   br label %return
 
 return:

diff  --git a/llvm/test/Transforms/Inline/cgscc-update.ll b/llvm/test/Transforms/Inline/cgscc-update.ll
index b5c30360548c1..f1121fa88a4b1 100644
--- a/llvm/test/Transforms/Inline/cgscc-update.ll
+++ b/llvm/test/Transforms/Inline/cgscc-update.ll
@@ -20,7 +20,7 @@ declare void @readnone() readnone nounwind
 
 ; This function should no longer exist.
 ; CHECK-NOT: @test1_f()
-define internal void @test1_f(void()* %p) {
+define internal void @test1_f(ptr %p) {
 entry:
   call void %p()
   ret void
@@ -31,7 +31,7 @@ entry:
 ; CHECK-NEXT: define void @test1_g()
 define void @test1_g() noinline {
 entry:
-  call void @test1_f(void()* @test1_h)
+  call void @test1_f(ptr @test1_h)
   ret void
 }
 
@@ -53,9 +53,9 @@ entry:
 
 ; This function should no longer exist.
 ; CHECK-NOT: @test2_f()
-define internal void()* @test2_f() {
+define internal ptr @test2_f() {
 entry:
-  ret void()* @test2_h
+  ret ptr @test2_h
 }
 
 ; This function should have had 'memory(none)' deduced for its SCC.
@@ -63,7 +63,7 @@ entry:
 ; CHECK-NEXT: define void @test2_g()
 define void @test2_g() noinline {
 entry:
-  %p = call void()* @test2_f()
+  %p = call ptr @test2_f()
   call void %p()
   ret void
 }
@@ -168,7 +168,7 @@ entry:
 }
 
 ; CHECK-NOT: @test4_g
-define internal void @test4_g(void()* %p) {
+define internal void @test4_g(ptr %p) {
 entry:
   call void %p()
   ret void
@@ -179,6 +179,6 @@ entry:
 ; CHECK-NEXT: define void @test4_h()
 define void @test4_h() noinline {
 entry:
-  call void @test4_g(void()* @test4_f2)
+  call void @test4_g(ptr @test4_f2)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/crash-lifetime-marker.ll b/llvm/test/Transforms/Inline/crash-lifetime-marker.ll
index daa24029f0141..1c79b10d413bd 100644
--- a/llvm/test/Transforms/Inline/crash-lifetime-marker.ll
+++ b/llvm/test/Transforms/Inline/crash-lifetime-marker.ll
@@ -6,19 +6,19 @@
 ; there was an zero-sized AllocaInst. Check that it doesn't assert and doesn't
 ; leave lifetime markers in that case.
 
-declare i32 @callee2(i8*)
+declare i32 @callee2(ptr)
 
 define i32 @callee1(i32 %count) {
   %a0 = alloca i8, i32 %count, align 4
-  %call0 = call i32 @callee2(i8* %a0)
+  %call0 = call i32 @callee2(ptr %a0)
   ret i32 %call0
 }
 
 ; CHECK-LABEL: define i32 @caller1(
 ; CHECK: [[ALLOCA:%[a-z0-9\.]+]] = alloca i8
-; CHECK-NOT: call void @llvm.lifetime.start.p0i8(
-; CHECK: call i32 @callee2(i8* [[ALLOCA]])
-; CHECK-NOT: call void @llvm.lifetime.end.p0i8(
+; CHECK-NOT: call void @llvm.lifetime.start.p0(
+; CHECK: call i32 @callee2(ptr [[ALLOCA]])
+; CHECK-NOT: call void @llvm.lifetime.end.p0(
 
 define i32 @caller1(i32 %count) {
   %call0 = call i32 @callee1(i32 0)

diff  --git a/llvm/test/Transforms/Inline/crash.ll b/llvm/test/Transforms/Inline/crash.ll
index f8cab83f371b8..6254d905305fe 100644
--- a/llvm/test/Transforms/Inline/crash.ll
+++ b/llvm/test/Transforms/Inline/crash.ll
@@ -11,43 +11,43 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
 target triple = "i386-apple-darwin10.0"
 
 
-define void @list_DeleteElement(i32 (i8*, i8*)* nocapture %Test) nounwind ssp {
+define void @list_DeleteElement(ptr nocapture %Test) nounwind ssp {
 entry:
-  %0 = call i32 %Test(i8* null, i8* undef) nounwind
+  %0 = call i32 %Test(ptr null, ptr undef) nounwind
   ret void
 }
 
 
-define void @list_DeleteDuplicates(i32 (i8*, i8*)* nocapture %Test) nounwind ssp {
+define void @list_DeleteDuplicates(ptr nocapture %Test) nounwind ssp {
 foo:
-  call void @list_DeleteElement(i32 (i8*, i8*)* %Test) nounwind ssp 
+  call void @list_DeleteElement(ptr %Test) nounwind ssp 
   call fastcc void @list_Rplacd1284() nounwind ssp
   unreachable
 
 }
 
-define internal i32 @inf_LiteralsHaveSameSubtermAndAreFromSameClause(i32* nocapture %L1, i32* nocapture %L2) nounwind readonly ssp {
+define internal i32 @inf_LiteralsHaveSameSubtermAndAreFromSameClause(ptr nocapture %L1, ptr nocapture %L2) nounwind readonly ssp {
 entry:
   unreachable
 }
 
 
-define internal fastcc void @inf_GetBackwardPartnerLits(i32* nocapture %Flags) nounwind ssp {
+define internal fastcc void @inf_GetBackwardPartnerLits(ptr nocapture %Flags) nounwind ssp {
 test:
-  call void @list_DeleteDuplicates(i32 (i8*, i8*)* bitcast (i32 (i32*, i32*)* @inf_LiteralsHaveSameSubtermAndAreFromSameClause to i32 (i8*, i8*)*)) nounwind 
+  call void @list_DeleteDuplicates(ptr @inf_LiteralsHaveSameSubtermAndAreFromSameClause) nounwind 
   ret void
 }
 
 
 define void @inf_BackwardEmptySortPlusPlus() nounwind ssp {
 entry:
-  call fastcc void @inf_GetBackwardPartnerLits(i32* null) nounwind ssp
+  call fastcc void @inf_GetBackwardPartnerLits(ptr null) nounwind ssp
   unreachable
 }
 
 define void @inf_BackwardWeakening() nounwind ssp {
 entry:
-  call fastcc void @inf_GetBackwardPartnerLits(i32* null) nounwind ssp
+  call fastcc void @inf_GetBackwardPartnerLits(ptr null) nounwind ssp
   unreachable
 }
 
@@ -59,7 +59,7 @@ declare fastcc void @list_Rplacd1284() nounwind ssp
 ;============================
 ; PR5208
 
-define void @AAA() personality i32 (...)* @__gxx_personality_v0 {
+define void @AAA() personality ptr @__gxx_personality_v0 {
 entry:
   %A = alloca i8, i32 undef, align 1
   invoke fastcc void @XXX()
@@ -69,7 +69,7 @@ invcont98:
   unreachable
 
 lpad156:                            
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
             cleanup
   unreachable
 }
@@ -78,7 +78,7 @@ declare i32 @__gxx_personality_v0(...)
 
 declare fastcc void @YYY()
 
-define internal fastcc void @XXX() personality i32 (...)* @__gxx_personality_v0 {
+define internal fastcc void @XXX() personality ptr @__gxx_personality_v0 {
 entry:
   %B = alloca i8, i32 undef, align 1
   invoke fastcc void @YYY()
@@ -88,30 +88,30 @@ bb260:
   ret void
 
 lpad:                               
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
             cleanup
-  resume { i8*, i32 } %exn
+  resume { ptr, i32 } %exn
 }
 
 
 
 ;; This exposed a crash handling devirtualized calls.
-define void @f1(void ()* %f) ssp {
+define void @f1(ptr %f) ssp {
 entry:
   call void %f()
   ret void
 }
 
-define void @f4(i32 %size) ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @f4(i32 %size) ssp personality ptr @__gxx_personality_v0 {
 entry:
-  invoke void @f1(void ()* @f3)
+  invoke void @f1(ptr @f3)
           to label %invcont3 unwind label %lpad18
 
 invcont3:                                         ; preds = %bb1
   ret void
 
 lpad18:                                           ; preds = %invcont3, %bb1
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
             cleanup
   unreachable
 }

diff  --git a/llvm/test/Transforms/Inline/crash2.ll b/llvm/test/Transforms/Inline/crash2.ll
index 7c4eda91b1922..9ddc4d3f81081 100644
--- a/llvm/test/Transforms/Inline/crash2.ll
+++ b/llvm/test/Transforms/Inline/crash2.ll
@@ -2,28 +2,28 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.3"
 
-declare i8* @f1(i8*) ssp align 2
+declare ptr @f1(ptr) ssp align 2
 
-define linkonce_odr void @f2(i8* %t) inlinehint ssp {
+define linkonce_odr void @f2(ptr %t) inlinehint ssp {
 entry:
   unreachable
 }
 
-define linkonce_odr void @f3(void (i8*)* %__f) ssp {
+define linkonce_odr void @f3(ptr %__f) ssp {
 entry:
-  %__f_addr = alloca void (i8*)*, align 8
-  store void (i8*)* %__f, void (i8*)** %__f_addr
+  %__f_addr = alloca ptr, align 8
+  store ptr %__f, ptr %__f_addr
 
-  %0 = load void (i8*)*, void (i8*)** %__f_addr, align 8
-  call void %0(i8* undef)
-  call i8* @f1(i8* undef) ssp
+  %0 = load ptr, ptr %__f_addr, align 8
+  call void %0(ptr undef)
+  call ptr @f1(ptr undef) ssp
   unreachable
 }
 
-define linkonce_odr void @f4(i8* %this) ssp align 2 {
+define linkonce_odr void @f4(ptr %this) ssp align 2 {
 entry:
   %0 = alloca i32
-  call void @f3(void (i8*)* @f2) ssp
+  call void @f3(ptr @f2) ssp
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/debug-invoke.ll b/llvm/test/Transforms/Inline/debug-invoke.ll
index 9867bb799ce7b..3cf1ca36e774a 100644
--- a/llvm/test/Transforms/Inline/debug-invoke.ll
+++ b/llvm/test/Transforms/Inline/debug-invoke.ll
@@ -17,7 +17,7 @@ define void @inl() #0 {
   ret void
 }
 
-define void @caller() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @caller() personality ptr @__gxx_personality_v0 {
   invoke void @inl()
     to label %cont unwind label %lpad, !dbg !4
 
@@ -25,7 +25,7 @@ cont:
   ret void
 
 lpad:
-  landingpad { i8*, i32 }
+  landingpad { ptr, i32 }
     cleanup
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/delete-function-with-metadata-use.ll b/llvm/test/Transforms/Inline/delete-function-with-metadata-use.ll
index ae0015f2a131a..8176135a5b538 100644
--- a/llvm/test/Transforms/Inline/delete-function-with-metadata-use.ll
+++ b/llvm/test/Transforms/Inline/delete-function-with-metadata-use.ll
@@ -3,17 +3,17 @@
 ; CHECK: define {{.*}}@f1
 ; CHECK-NOT: define
 
-%a = type { i8*, i8* }
+%a = type { ptr, ptr }
 
 $f3 = comdat any
 
 define linkonce_odr void @f1() {
-  call void @f2(void ()* @f3)
+  call void @f2(ptr @f3)
   ret void
 }
 
-define linkonce_odr void @f2(void ()* %__f) {
-  call void @llvm.dbg.value(metadata void ()* %__f, metadata !2, metadata !DIExpression()), !dbg !10
+define linkonce_odr void @f2(ptr %__f) {
+  call void @llvm.dbg.value(metadata ptr %__f, metadata !2, metadata !DIExpression()), !dbg !10
   call void %__f()
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/deleted-scc.ll b/llvm/test/Transforms/Inline/deleted-scc.ll
index ca96d8657c3ec..7510f045ec6e5 100644
--- a/llvm/test/Transforms/Inline/deleted-scc.ll
+++ b/llvm/test/Transforms/Inline/deleted-scc.ll
@@ -24,13 +24,13 @@ entry:
 
 L:                                                ; preds = %cleanup9, %entry
   %cleanup.dest.slot.0 = phi i32 [ undef, %entry ], [ %cleanup.dest.slot.3, %cleanup9 ]
-  store i32 0, i32* @b, align 4
+  store i32 0, ptr @b, align 4
   %tobool.not = icmp eq i32 0, 0
   br i1 %tobool.not, label %if.then, label %while.cond
 
 while.cond:                                       ; preds = %cleanup9, %L
   %cleanup.dest.slot.2 = phi i32 [ %cleanup.dest.slot.0, %L ], [ 0, %cleanup9 ]
-  %0 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @b, align 4
   %tobool3.not = icmp eq i32 %0, 0
   br i1 %tobool3.not, label %cleanup9, label %while.body4
 

diff  --git a/llvm/test/Transforms/Inline/deoptimize-intrinsic.ll b/llvm/test/Transforms/Inline/deoptimize-intrinsic.ll
index 30c5e48f19a07..1cc327ada7ee3 100644
--- a/llvm/test/Transforms/Inline/deoptimize-intrinsic.ll
+++ b/llvm/test/Transforms/Inline/deoptimize-intrinsic.ll
@@ -3,12 +3,12 @@
 declare i8 @llvm.experimental.deoptimize.i8(...)
 declare i32 @llvm.experimental.deoptimize.i32(...)
 
-define i8 @callee(i1* %c) alwaysinline {
-  %c0 = load volatile i1, i1* %c
+define i8 @callee(ptr %c) alwaysinline {
+  %c0 = load volatile i1, ptr %c
   br i1 %c0, label %left, label %right
 
 left:
-  %c1 = load volatile i1, i1* %c
+  %c1 = load volatile i1, ptr %c
   br i1 %c1, label %lleft, label %lright
 
 lleft:
@@ -19,11 +19,11 @@ lright:
   ret i8 10
 
 right:
-  %c2 = load volatile i1, i1* %c
+  %c2 = load volatile i1, ptr %c
   br i1 %c2, label %rleft, label %rright
 
 rleft:
-  %v1 = call i8(...) @llvm.experimental.deoptimize.i8(i32 1, i32 300, float 500.0, <2 x i32*> undef) [ "deopt"(i32 1) ]
+  %v1 = call i8(...) @llvm.experimental.deoptimize.i8(i32 1, i32 300, float 500.0, <2 x ptr> undef) [ "deopt"(i32 1) ]
   ret i8 %v1
 
 rright:
@@ -31,11 +31,11 @@ rright:
   ret i8 %v2
 }
 
-define void @caller_0(i1* %c, i8* %ptr) {
+define void @caller_0(ptr %c, ptr %ptr) {
 ; CHECK-LABEL: @caller_0(
 entry:
-  %v = call i8 @callee(i1* %c)  [ "deopt"(i32 2) ]
-  store i8 %v, i8* %ptr
+  %v = call i8 @callee(ptr %c)  [ "deopt"(i32 2) ]
+  store i8 %v, ptr %ptr
   ret void
 
 ; CHECK: lleft.i:
@@ -43,7 +43,7 @@ entry:
 ; CHECK-NEXT:  ret void
 
 ; CHECK: rleft.i:
-; CHECK-NEXT:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1, i32 300, float 5.000000e+02, <2 x i32*> undef) [ "deopt"(i32 2, i32 1) ]
+; CHECK-NEXT:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1, i32 300, float 5.000000e+02, <2 x ptr> undef) [ "deopt"(i32 2, i32 1) ]
 ; CHECK-NEXT:  ret void
 
 ; CHECK: rright.i:
@@ -51,15 +51,15 @@ entry:
 ; CHECK-NEXT:  ret void
 
 ; CHECK: callee.exit:
-; CHECK-NEXT:  store i8 10, i8* %ptr
+; CHECK-NEXT:  store i8 10, ptr %ptr
 ; CHECK-NEXT:  ret void
 
 }
 
-define i32 @caller_1(i1* %c, i8* %ptr) personality i8 3 {
+define i32 @caller_1(ptr %c, ptr %ptr) personality i8 3 {
 ; CHECK-LABEL: @caller_1(
 entry:
-  %v = invoke i8 @callee(i1* %c)  [ "deopt"(i32 3) ] to label %normal
+  %v = invoke i8 @callee(ptr %c)  [ "deopt"(i32 3) ] to label %normal
        unwind label %unwind
 
 ; CHECK: lleft.i:
@@ -67,7 +67,7 @@ entry:
 ; CHECK-NEXT:  ret i32 %0
 
 ; CHECK: rleft.i:
-; CHECK-NEXT:  %1 = call i32 (...) @llvm.experimental.deoptimize.i32(i32 1, i32 300, float 5.000000e+02, <2 x i32*> undef) [ "deopt"(i32 3, i32 1) ]
+; CHECK-NEXT:  %1 = call i32 (...) @llvm.experimental.deoptimize.i32(i32 1, i32 300, float 5.000000e+02, <2 x ptr> undef) [ "deopt"(i32 3, i32 1) ]
 ; CHECK-NEXT:  ret i32 %1
 
 ; CHECK: rright.i:
@@ -78,7 +78,7 @@ entry:
 ; CHECK-NEXT:  br label %normal
 
 ; CHECK: normal:
-; CHECK-NEXT:  store i8 10, i8* %ptr
+; CHECK-NEXT:  store i8 10, ptr %ptr
 ; CHECK-NEXT:  ret i32 42
 
 unwind:
@@ -86,19 +86,19 @@ unwind:
   ret i32 43
 
 normal:
-  store i8 %v, i8* %ptr
+  store i8 %v, ptr %ptr
   ret i32 42
 }
 
 define i8 @callee_with_alloca() alwaysinline {
   %t = alloca i8
-  %v0 = call i8(...) @llvm.experimental.deoptimize.i8(i32 1) [ "deopt"(i8* %t) ]
+  %v0 = call i8(...) @llvm.experimental.deoptimize.i8(i32 1) [ "deopt"(ptr %t) ]
   ret i8 %v0
 }
 
 define void @caller_with_lifetime() {
 ; CHECK-LABEL: @caller_with_lifetime(
-; CHECK:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1) [ "deopt"(i8* %t.i) ]
+; CHECK:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1) [ "deopt"(ptr %t.i) ]
 ; CHECK-NEXT:  ret void
 
 entry:
@@ -108,13 +108,13 @@ entry:
 
 define i8 @callee_with_dynamic_alloca(i32 %n) alwaysinline {
   %p = alloca i8, i32 %n
-  %v = call i8(...) @llvm.experimental.deoptimize.i8(i32 1) [ "deopt"(i8* %p) ]
+  %v = call i8(...) @llvm.experimental.deoptimize.i8(i32 1) [ "deopt"(ptr %p) ]
   ret i8 %v
 }
 
 define void @caller_with_stacksaverestore(i32 %n) {
 ; CHECK-LABEL: void @caller_with_stacksaverestore(
-; CHECK:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1) [ "deopt"(i8* %p.i) ]
+; CHECK:  call void (...) @llvm.experimental.deoptimize.isVoid(i32 1) [ "deopt"(ptr %p.i) ]
 ; CHECK-NEXT:  ret void
 
   %p = alloca i32, i32 %n

diff  --git a/llvm/test/Transforms/Inline/devirtualize-2.ll b/llvm/test/Transforms/Inline/devirtualize-2.ll
index 617dde388bada..575e606c84f5b 100644
--- a/llvm/test/Transforms/Inline/devirtualize-2.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-2.ll
@@ -2,13 +2,13 @@
 ; PR4834
 
 define i32 @test1() {
-  %funcall1_ = call fastcc i32 ()* () @f1()
+  %funcall1_ = call fastcc ptr () @f1()
   %executecommandptr1_ = call i32 %funcall1_()
   ret i32 %executecommandptr1_
 }
 
-define internal fastcc i32 ()* @f1() nounwind readnone {
-  ret i32 ()* @f2
+define internal fastcc ptr @f1() nounwind readnone {
+  ret ptr @f2
 }
 
 define internal i32 @f2() nounwind readnone {
@@ -22,21 +22,21 @@ define internal i32 @f2() nounwind readnone {
 
 
 
-declare i8* @f1a(i8*) ssp align 2
+declare ptr @f1a(ptr) ssp align 2
 
-define internal i32 @f2a(i8* %t) inlinehint ssp {
+define internal i32 @f2a(ptr %t) inlinehint ssp {
 entry:
   ret i32 41
 }
 
-define internal i32 @f3a(i32 (i8*)* %__f) ssp {
+define internal i32 @f3a(ptr %__f) ssp {
 entry:
-  %A = call i32 %__f(i8* undef)
+  %A = call i32 %__f(ptr undef)
   ret i32 %A
 }
 
-define i32 @test2(i8* %this) ssp align 2 {
-  %X = call i32 @f3a(i32 (i8*)* @f2a) ssp
+define i32 @test2(ptr %this) ssp align 2 {
+  %X = call i32 @f3a(ptr @f2a) ssp
   ret i32 %X
 }
 

diff  --git a/llvm/test/Transforms/Inline/devirtualize-3.ll b/llvm/test/Transforms/Inline/devirtualize-3.ll
index ea6d39af365b3..b4f80072391ad 100644
--- a/llvm/test/Transforms/Inline/devirtualize-3.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-3.ll
@@ -8,70 +8,68 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.0.0"
 
-%struct.cont_t = type { void (i8*, i32)*, i8* }
-%struct.foo_sf_t = type { %struct.cont_t*, i32 }
+%struct.cont_t = type { ptr, ptr }
+%struct.foo_sf_t = type { ptr, i32 }
 
 define i32 @main() nounwind ssp {
 entry:
-  %cont = alloca %struct.cont_t, align 8          ; <%struct.cont_t*> [#uses=4]
-  %tmp = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
-  %tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=2]
-  store void (i8*, i32)* bitcast (void (%struct.cont_t*, i32)* @quit to void (i8*, i32)*), void (i8*, i32)** %tmp1
-  %tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1            ; <void (i8*, i32)*> [#uses=1]
-  store void (i8*, i32)* %tmp2, void (i8*, i32)** %tmp
-  %tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 1 ; <i8**> [#uses=1]
-  store i8* null, i8** %tmp3
-  call void @foo(%struct.cont_t* %cont)
+  %cont = alloca %struct.cont_t, align 8          ; <ptr> [#uses=4]
+  %tmp = getelementptr inbounds %struct.cont_t, ptr %cont, i32 0, i32 0 ; <ptr> [#uses=1]
+  %tmp1 = getelementptr inbounds %struct.cont_t, ptr %cont, i32 0, i32 0 ; <ptr> [#uses=2]
+  store ptr @quit, ptr %tmp1
+  %tmp2 = load ptr, ptr %tmp1            ; <ptr> [#uses=1]
+  store ptr %tmp2, ptr %tmp
+  %tmp3 = getelementptr inbounds %struct.cont_t, ptr %cont, i32 0, i32 1 ; <ptr> [#uses=1]
+  store ptr null, ptr %tmp3
+  call void @foo(ptr %cont)
   ret i32 0
 }
 
-define internal void @quit(%struct.cont_t* %cont, i32 %rcode) nounwind ssp {
+define internal void @quit(ptr %cont, i32 %rcode) nounwind ssp {
 entry:
   call void @exit(i32 %rcode) noreturn
   unreachable
 }
 
-define internal void @foo(%struct.cont_t* %c) nounwind ssp {
+define internal void @foo(ptr %c) nounwind ssp {
 entry:
-  %sf = alloca %struct.foo_sf_t, align 8          ; <%struct.foo_sf_t*> [#uses=3]
-  %next = alloca %struct.cont_t, align 8          ; <%struct.cont_t*> [#uses=3]
-  %tmp = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
-  store %struct.cont_t* %c, %struct.cont_t** %tmp
-  %tmp2 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
-  store i32 2, i32* %tmp2
-  %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
-  store void (i8*, i32)* bitcast (void (%struct.foo_sf_t*, i32)* @foo2 to void (i8*, i32)*), void (i8*, i32)** %tmp4
-  %tmp5 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 1 ; <i8**> [#uses=1]
-  %conv = bitcast %struct.foo_sf_t* %sf to i8*    ; <i8*> [#uses=1]
-  store i8* %conv, i8** %tmp5
-  call void @bar(%struct.cont_t* %next, i32 14)
+  %sf = alloca %struct.foo_sf_t, align 8          ; <ptr> [#uses=3]
+  %next = alloca %struct.cont_t, align 8          ; <ptr> [#uses=3]
+  %tmp = getelementptr inbounds %struct.foo_sf_t, ptr %sf, i32 0, i32 0 ; <ptr> [#uses=1]
+  store ptr %c, ptr %tmp
+  %tmp2 = getelementptr inbounds %struct.foo_sf_t, ptr %sf, i32 0, i32 1 ; <ptr> [#uses=1]
+  store i32 2, ptr %tmp2
+  %tmp4 = getelementptr inbounds %struct.cont_t, ptr %next, i32 0, i32 0 ; <ptr> [#uses=1]
+  store ptr @foo2, ptr %tmp4
+  %tmp5 = getelementptr inbounds %struct.cont_t, ptr %next, i32 0, i32 1 ; <ptr> [#uses=1]
+  store ptr %sf, ptr %tmp5
+  call void @bar(ptr %next, i32 14)
   ret void
 }
 
-define internal void @foo2(%struct.foo_sf_t* %sf, i32 %y) nounwind ssp {
+define internal void @foo2(ptr %sf, i32 %y) nounwind ssp {
 entry:
-  %tmp1 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
-  %tmp2 = load %struct.cont_t*, %struct.cont_t** %tmp1             ; <%struct.cont_t*> [#uses=1]
-  %tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %tmp2, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
-  %tmp4 = load void (i8*, i32)*, void (i8*, i32)** %tmp3            ; <void (i8*, i32)*> [#uses=1]
-  %tmp6 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
-  %tmp7 = load %struct.cont_t*, %struct.cont_t** %tmp6             ; <%struct.cont_t*> [#uses=1]
-  %conv = bitcast %struct.cont_t* %tmp7 to i8*    ; <i8*> [#uses=1]
-  %tmp9 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
-  %tmp10 = load i32, i32* %tmp9                        ; <i32> [#uses=1]
+  %tmp1 = getelementptr inbounds %struct.foo_sf_t, ptr %sf, i32 0, i32 0 ; <ptr> [#uses=1]
+  %tmp2 = load ptr, ptr %tmp1             ; <ptr> [#uses=1]
+  %tmp3 = getelementptr inbounds %struct.cont_t, ptr %tmp2, i32 0, i32 0 ; <ptr> [#uses=1]
+  %tmp4 = load ptr, ptr %tmp3            ; <ptr> [#uses=1]
+  %tmp6 = getelementptr inbounds %struct.foo_sf_t, ptr %sf, i32 0, i32 0 ; <ptr> [#uses=1]
+  %tmp7 = load ptr, ptr %tmp6             ; <ptr> [#uses=1]
+  %tmp9 = getelementptr inbounds %struct.foo_sf_t, ptr %sf, i32 0, i32 1 ; <ptr> [#uses=1]
+  %tmp10 = load i32, ptr %tmp9                        ; <i32> [#uses=1]
   %mul = mul i32 %tmp10, %y                       ; <i32> [#uses=1]
-  call void %tmp4(i8* %conv, i32 %mul)
+  call void %tmp4(ptr %tmp7, i32 %mul)
   ret void
 }
 
-define internal void @bar(%struct.cont_t* %c, i32 %y) nounwind ssp {
+define internal void @bar(ptr %c, i32 %y) nounwind ssp {
 entry:
-  %tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
-  %tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1            ; <void (i8*, i32)*> [#uses=1]
-  %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
-  %tmp5 = load i8*, i8** %tmp4                         ; <i8*> [#uses=1]
+  %tmp1 = getelementptr inbounds %struct.cont_t, ptr %c, i32 0, i32 0 ; <ptr> [#uses=1]
+  %tmp2 = load ptr, ptr %tmp1            ; <ptr> [#uses=1]
+  %tmp4 = getelementptr inbounds %struct.cont_t, ptr %c, i32 0, i32 1 ; <ptr> [#uses=1]
+  %tmp5 = load ptr, ptr %tmp4                         ; <ptr> [#uses=1]
   %add = add nsw i32 %y, 5                        ; <i32> [#uses=1]
-  call void %tmp2(i8* %tmp5, i32 %add)
+  call void %tmp2(ptr %tmp5, i32 %add)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll
index 45867f034185a..fae364313ea02 100644
--- a/llvm/test/Transforms/Inline/devirtualize-4.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-4.ll
@@ -34,76 +34,69 @@
 ;}
 
 %class.Impl = type <{ %class.Interface, i32, [4 x i8] }>
-%class.Interface = type { i32 (...)** }
+%class.Interface = type { ptr }
 
- at _ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI4Impl to i8*), i8* bitcast (void (%class.Impl*)* @_ZN4Impl3RunEv to i8*)] }, align 8
- at _ZTVN10__cxxabiv120__si_class_type_infoE = external dso_local global i8*
+ at _ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI4Impl, ptr @_ZN4Impl3RunEv] }, align 8
+ at _ZTVN10__cxxabiv120__si_class_type_infoE = external dso_local global ptr
 @_ZTS4Impl = linkonce_odr dso_local constant [6 x i8] c"4Impl\00", align 1
- at _ZTVN10__cxxabiv117__class_type_infoE = external dso_local global i8*
+ at _ZTVN10__cxxabiv117__class_type_infoE = external dso_local global ptr
 @_ZTS9Interface = linkonce_odr dso_local constant [11 x i8] c"9Interface\00", align 1
- at _ZTI9Interface = linkonce_odr dso_local constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @_ZTS9Interface, i32 0, i32 0) }, align 8
- at _ZTI4Impl = linkonce_odr dso_local constant { i8*, i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @_ZTS4Impl, i32 0, i32 0), i8* bitcast ({ i8*, i8* }* @_ZTI9Interface to i8*) }, align 8
- at _ZTV9Interface = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI9Interface to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)] }, align 8
+ at _ZTI9Interface = linkonce_odr dso_local constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS9Interface }, align 8
+ at _ZTI4Impl = linkonce_odr dso_local constant { ptr, ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr @_ZTS4Impl, ptr @_ZTI9Interface }, align 8
+ at _ZTV9Interface = linkonce_odr dso_local unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr null, ptr @_ZTI9Interface, ptr @__cxa_pure_virtual] }, align 8
 
 define dso_local void @_Z4Testv() local_unnamed_addr {
 entry:
   %o = alloca %class.Impl, align 8
-  %0 = bitcast %class.Impl* %o to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %0)
-  call void @_ZN4ImplC2Ev(%class.Impl* nonnull %o)
-  %1 = getelementptr inbounds %class.Impl, %class.Impl* %o, i64 0, i32 0
-  call fastcc void @_ZL11IndirectRunR9Interface(%class.Interface* nonnull dereferenceable(8) %1)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %o)
+  call void @_ZN4ImplC2Ev(ptr nonnull %o)
+  call fastcc void @_ZL11IndirectRunR9Interface(ptr nonnull dereferenceable(8) %o)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %o)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-define linkonce_odr dso_local void @_ZN4ImplC2Ev(%class.Impl* %this) unnamed_addr align 2 {
+define linkonce_odr dso_local void @_ZN4ImplC2Ev(ptr %this) unnamed_addr align 2 {
 entry:
-  %0 = getelementptr %class.Impl, %class.Impl* %this, i64 0, i32 0
-  call void @_ZN9InterfaceC2Ev(%class.Interface* %0)
-  %1 = getelementptr %class.Impl, %class.Impl* %this, i64 0, i32 0, i32 0
-  store i32 (...)** bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV4Impl, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %1, align 8
-  %f = getelementptr inbounds %class.Impl, %class.Impl* %this, i64 0, i32 1
-  store i32 3, i32* %f, align 8
+  call void @_ZN9InterfaceC2Ev(ptr %this)
+  store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV4Impl, i64 0, inrange i32 0, i64 2), ptr %this, align 8
+  %f = getelementptr inbounds %class.Impl, ptr %this, i64 0, i32 1
+  store i32 3, ptr %f, align 8
   ret void
 }
 
-define internal fastcc void @_ZL11IndirectRunR9Interface(%class.Interface* dereferenceable(8) %o) unnamed_addr {
+define internal fastcc void @_ZL11IndirectRunR9Interface(ptr dereferenceable(8) %o) unnamed_addr {
 entry:
-  %0 = bitcast %class.Interface* %o to void (%class.Interface*)***
-  %vtable = load void (%class.Interface*)**, void (%class.Interface*)*** %0, align 8
-  %1 = load void (%class.Interface*)*, void (%class.Interface*)** %vtable, align 8
-  call void %1(%class.Interface* nonnull %o)
+  %vtable = load ptr, ptr %o, align 8
+  %0 = load ptr, ptr %vtable, align 8
+  call void %0(ptr nonnull %o)
   ret void
 }
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(%class.Interface* %this) unnamed_addr align 2 {
+define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(ptr %this) unnamed_addr align 2 {
 entry:
-  %0 = getelementptr %class.Interface, %class.Interface* %this, i64 0, i32 0
-  store i32 (...)** bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV9Interface, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
+  store ptr getelementptr inbounds ({ [3 x ptr] }, ptr @_ZTV9Interface, i64 0, inrange i32 0, i64 2), ptr %this, align 8
   ret void
 }
 
-define linkonce_odr dso_local void @_ZN4Impl3RunEv(%class.Impl* %this) unnamed_addr align 2 {
+define linkonce_odr dso_local void @_ZN4Impl3RunEv(ptr %this) unnamed_addr align 2 {
 entry:
-  %ref.tmp = alloca %class.Impl*, align 8
-  %0 = bitcast %class.Impl** %ref.tmp to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  store %class.Impl* %this, %class.Impl** %ref.tmp, align 8
-  call void @_Z13DoNotOptimizeIP4ImplEvRKT_(%class.Impl** nonnull dereferenceable(8) %ref.tmp)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  %ref.tmp = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ref.tmp)
+  store ptr %this, ptr %ref.tmp, align 8
+  call void @_Z13DoNotOptimizeIP4ImplEvRKT_(ptr nonnull dereferenceable(8) %ref.tmp)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ref.tmp)
   ret void
 }
 
 declare dso_local void @__cxa_pure_virtual() unnamed_addr
 
-define linkonce_odr dso_local void @_Z13DoNotOptimizeIP4ImplEvRKT_(%class.Impl** dereferenceable(8) %var) local_unnamed_addr {
+define linkonce_odr dso_local void @_Z13DoNotOptimizeIP4ImplEvRKT_(ptr dereferenceable(8) %var) local_unnamed_addr {
 entry:
-  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(%class.Impl** elementtype(%class.Impl*) nonnull %var, %class.Impl** elementtype(%class.Impl*) nonnull %var)
+  call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(ptr) nonnull %var, ptr elementtype(ptr) nonnull %var)
   ret void
 }
 
@@ -134,81 +127,74 @@ entry:
 ;  return f(&a, &A::vf2);
 ;}
 
-%struct.A = type { i32 (...)** }
+%struct.A = type { ptr }
 
- at _ZTV1A = linkonce_odr unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1A3vf1Ev to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1A3vf2Ev to i8*)] }, align 8
+ at _ZTV1A = linkonce_odr unnamed_addr constant { [4 x ptr] } { [4 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A3vf1Ev, ptr @_ZN1A3vf2Ev] }, align 8
 @_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00", align 1
- at _ZTI1A = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1A, i32 0, i32 0) }, align 8
+ at _ZTI1A = linkonce_odr constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1A }, align 8
 
-define i32 @_Z1fP1AMS_FivE(%struct.A* %a, i64 %fp.coerce0, i64 %fp.coerce1) {
+define i32 @_Z1fP1AMS_FivE(ptr %a, i64 %fp.coerce0, i64 %fp.coerce1) {
 entry:
-  %0 = bitcast %struct.A* %a to i8*
-  %1 = getelementptr inbounds i8, i8* %0, i64 %fp.coerce1
-  %this.adjusted = bitcast i8* %1 to %struct.A*
-  %2 = and i64 %fp.coerce0, 1
-  %memptr.isvirtual = icmp eq i64 %2, 0
+  %0 = getelementptr inbounds i8, ptr %a, i64 %fp.coerce1
+  %1 = and i64 %fp.coerce0, 1
+  %memptr.isvirtual = icmp eq i64 %1, 0
   br i1 %memptr.isvirtual, label %memptr.nonvirtual, label %memptr.virtual
 
 memptr.virtual:                                   ; preds = %entry
-  %3 = bitcast i8* %1 to i8**
-  %vtable = load i8*, i8** %3, align 8
-  %4 = add i64 %fp.coerce0, -1
-  %5 = getelementptr i8, i8* %vtable, i64 %4
-  %6 = bitcast i8* %5 to i32 (%struct.A*)**
-  %memptr.virtualfn = load i32 (%struct.A*)*, i32 (%struct.A*)** %6, align 8
+  %vtable = load ptr, ptr %0, align 8
+  %2 = add i64 %fp.coerce0, -1
+  %3 = getelementptr i8, ptr %vtable, i64 %2
+  %memptr.virtualfn = load ptr, ptr %3, align 8
   br label %memptr.end
 
 memptr.nonvirtual:                                ; preds = %entry
-  %memptr.nonvirtualfn = inttoptr i64 %fp.coerce0 to i32 (%struct.A*)*
+  %memptr.nonvirtualfn = inttoptr i64 %fp.coerce0 to ptr
   br label %memptr.end
 
 memptr.end:                                       ; preds = %memptr.nonvirtual, %memptr.virtual
-  %7 = phi i32 (%struct.A*)* [ %memptr.virtualfn, %memptr.virtual ], [ %memptr.nonvirtualfn, %memptr.nonvirtual ]
-  %call = call i32 %7(%struct.A* %this.adjusted)
+  %4 = phi ptr [ %memptr.virtualfn, %memptr.virtual ], [ %memptr.nonvirtualfn, %memptr.nonvirtual ]
+  %call = call i32 %4(ptr %0)
   ret i32 %call
 }
 
 define i32 @_Z2g1v() {
 entry:
   %a = alloca %struct.A, align 8
-  %0 = bitcast %struct.A* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @_ZN1AC1Ev(%struct.A* nonnull %a)
-  %call = call i32 @_Z1fP1AMS_FivE(%struct.A* nonnull %a, i64 1, i64 0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+  call void @_ZN1AC1Ev(ptr nonnull %a)
+  %call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 1, i64 0)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
   ret i32 %call
 }
 
-define linkonce_odr void @_ZN1AC1Ev(%struct.A* %this) align 2 {
+define linkonce_odr void @_ZN1AC1Ev(ptr %this) align 2 {
 entry:
-  call void @_ZN1AC2Ev(%struct.A* %this)
+  call void @_ZN1AC2Ev(ptr %this)
   ret void
 }
 
 define i32 @_Z2g2v() {
 entry:
   %a = alloca %struct.A, align 8
-  %0 = bitcast %struct.A* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @_ZN1AC1Ev(%struct.A* nonnull %a)
-  %call = call i32 @_Z1fP1AMS_FivE(%struct.A* nonnull %a, i64 9, i64 0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+  call void @_ZN1AC1Ev(ptr nonnull %a)
+  %call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 9, i64 0)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
   ret i32 %call
 }
 
-define linkonce_odr void @_ZN1AC2Ev(%struct.A* %this) align 2 {
+define linkonce_odr void @_ZN1AC2Ev(ptr %this) align 2 {
 entry:
-  %0 = getelementptr %struct.A, %struct.A* %this, i64 0, i32 0
-  store i32 (...)** bitcast (i8** getelementptr inbounds ({ [4 x i8*] }, { [4 x i8*] }* @_ZTV1A, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
+  store ptr getelementptr inbounds ({ [4 x ptr] }, ptr @_ZTV1A, i64 0, inrange i32 0, i64 2), ptr %this, align 8
   ret void
 }
 
-define linkonce_odr i32 @_ZN1A3vf1Ev(%struct.A* %this) align 2 {
+define linkonce_odr i32 @_ZN1A3vf1Ev(ptr %this) align 2 {
 entry:
   ret i32 1
 }
 
-define linkonce_odr i32 @_ZN1A3vf2Ev(%struct.A* %this) align 2 {
+define linkonce_odr i32 @_ZN1A3vf2Ev(ptr %this) align 2 {
 entry:
   ret i32 2
 }

diff  --git a/llvm/test/Transforms/Inline/devirtualize-5.ll b/llvm/test/Transforms/Inline/devirtualize-5.ll
index ab507d1d58a1d..dbfe445e898c7 100644
--- a/llvm/test/Transforms/Inline/devirtualize-5.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-5.ll
@@ -9,14 +9,14 @@ define i32 @i() alwaysinline {
 ; CHECK-NEXT: ret i32 45
 
 define i32 @main() {
-  %a = alloca i32 ()*
-  store i32 ()* @i, i32 ()** %a
-  %r = call i32 @call(i32 ()** %a)
+  %a = alloca ptr
+  store ptr @i, ptr %a
+  %r = call i32 @call(ptr %a)
   ret i32 %r
 }
 
-define i32 @call(i32 ()** %a) alwaysinline {
-  %c = load i32 ()*, i32 ()** %a
+define i32 @call(ptr %a) alwaysinline {
+  %c = load ptr, ptr %a
   %r = call i32 %c()
   ret i32 %r
 }

diff  --git a/llvm/test/Transforms/Inline/devirtualize.ll b/llvm/test/Transforms/Inline/devirtualize.ll
index cdeae519d0f0c..4426d17533a4a 100644
--- a/llvm/test/Transforms/Inline/devirtualize.ll
+++ b/llvm/test/Transforms/Inline/devirtualize.ll
@@ -6,11 +6,11 @@ target triple = "x86_64-apple-darwin10.0.0"
 
 ; Simple devirt testcase, requires iteration between inliner and GVN.
 ;  rdar://6295824
-define i32 @foo(i32 ()** noalias %p, i64* noalias %q) nounwind ssp {
+define i32 @foo(ptr noalias %p, ptr noalias %q) nounwind ssp {
 entry:
-  store i32 ()* @bar, i32 ()** %p
-  store i64 0, i64* %q
-  %tmp3 = load i32 ()*, i32 ()** %p                        ; <i32 ()*> [#uses=1]
+  store ptr @bar, ptr %p
+  store i64 0, ptr %q
+  %tmp3 = load ptr, ptr %p                        ; <ptr> [#uses=1]
   %call = call i32 %tmp3()                        ; <i32> [#uses=1]
   %X = add i32 %call, 4
   ret i32 %X
@@ -33,151 +33,129 @@ entry:
 ; CHECK-NEXT: entry:
 ; CHECK-NEXT: ret i32 7
 
-%0 = type { i8*, i8* }
-%1 = type { i8*, i8*, i32, i32, i8*, i64, i8*, i64 }
-%2 = type { i8*, i8*, i8* }
-%struct.A = type { i8** }
-%struct.B = type { i8** }
+%0 = type { ptr, ptr }
+%1 = type { ptr, ptr, i32, i32, ptr, i64, ptr, i64 }
+%2 = type { ptr, ptr, ptr }
+%struct.A = type { ptr }
+%struct.B = type { ptr }
 %struct.C = type { [16 x i8] }
 %struct.D = type { [16 x i8] }
 
- at _ZTV1D = linkonce_odr constant [6 x i8*] [i8* null, i8* bitcast (%2* @_ZTI1D to i8*), i8* bitcast (i32 (%struct.C*)* @_ZN1D1fEv to i8*), i8* inttoptr (i64 -8 to i8*), i8* bitcast (%2* @_ZTI1D to i8*), i8* bitcast (i32 (%struct.C*)* @_ZThn8_N1D1fEv to i8*)] ; <[6 x i8*]*> [#uses=2]
- at _ZTVN10__cxxabiv120__si_class_type_infoE = external global i8* ; <i8**> [#uses=1]
- at _ZTS1D = linkonce_odr constant [3 x i8] c"1D\00"     ; <[3 x i8]*> [#uses=1]
- at _ZTVN10__cxxabiv121__vmi_class_type_infoE = external global i8* ; <i8**> [#uses=1]
- at _ZTS1C = linkonce_odr constant [3 x i8] c"1C\00"     ; <[3 x i8]*> [#uses=1]
- at _ZTVN10__cxxabiv117__class_type_infoE = external global i8* ; <i8**> [#uses=1]
- at _ZTS1A = linkonce_odr constant [3 x i8] c"1A\00"     ; <[3 x i8]*> [#uses=1]
- at _ZTI1A = linkonce_odr constant %0 { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1A, i32 0, i32 0) } ; <%0*> [#uses=1]
- at _ZTS1B = linkonce_odr constant [3 x i8] c"1B\00"     ; <[3 x i8]*> [#uses=1]
- at _ZTI1B = linkonce_odr constant %0 { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1B, i32 0, i32 0) } ; <%0*> [#uses=1]
- at _ZTI1C = linkonce_odr constant %1 { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1C, i32 0, i32 0), i32 0, i32 2, i8* bitcast (%0* @_ZTI1A to i8*), i64 2, i8* bitcast (%0* @_ZTI1B to i8*), i64 2050 } ; <%1*> [#uses=1]
- at _ZTI1D = linkonce_odr constant %2 { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1D, i32 0, i32 0), i8* bitcast (%1* @_ZTI1C to i8*) } ; <%2*> [#uses=1]
- at _ZTV1C = linkonce_odr constant [6 x i8*] [i8* null, i8* bitcast (%1* @_ZTI1C to i8*), i8* bitcast (i32 (%struct.C*)* @_ZN1C1fEv to i8*), i8* inttoptr (i64 -8 to i8*), i8* bitcast (%1* @_ZTI1C to i8*), i8* bitcast (i32 (%struct.C*)* @_ZThn8_N1C1fEv to i8*)] ; <[6 x i8*]*> [#uses=2]
- at _ZTV1B = linkonce_odr constant [3 x i8*] [i8* null, i8* bitcast (%0* @_ZTI1B to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1B1fEv to i8*)] ; <[3 x i8*]*> [#uses=1]
- at _ZTV1A = linkonce_odr constant [3 x i8*] [i8* null, i8* bitcast (%0* @_ZTI1A to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1A1fEv to i8*)] ; <[3 x i8*]*> [#uses=1]
+ at _ZTV1D = linkonce_odr constant [6 x ptr] [ptr null, ptr @_ZTI1D, ptr @_ZN1D1fEv, ptr inttoptr (i64 -8 to ptr), ptr @_ZTI1D, ptr @_ZThn8_N1D1fEv] ; <ptr> [#uses=2]
+ at _ZTVN10__cxxabiv120__si_class_type_infoE = external global ptr ; <ptr> [#uses=1]
+ at _ZTS1D = linkonce_odr constant [3 x i8] c"1D\00"     ; <ptr> [#uses=1]
+ at _ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr ; <ptr> [#uses=1]
+ at _ZTS1C = linkonce_odr constant [3 x i8] c"1C\00"     ; <ptr> [#uses=1]
+ at _ZTVN10__cxxabiv117__class_type_infoE = external global ptr ; <ptr> [#uses=1]
+ at _ZTS1A = linkonce_odr constant [3 x i8] c"1A\00"     ; <ptr> [#uses=1]
+ at _ZTI1A = linkonce_odr constant %0 { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1A } ; <ptr> [#uses=1]
+ at _ZTS1B = linkonce_odr constant [3 x i8] c"1B\00"     ; <ptr> [#uses=1]
+ at _ZTI1B = linkonce_odr constant %0 { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i64 2), ptr @_ZTS1B } ; <ptr> [#uses=1]
+ at _ZTI1C = linkonce_odr constant %1 { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), ptr @_ZTS1C, i32 0, i32 2, ptr @_ZTI1A, i64 2, ptr @_ZTI1B, i64 2050 } ; <ptr> [#uses=1]
+ at _ZTI1D = linkonce_odr constant %2 { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr @_ZTS1D, ptr @_ZTI1C } ; <ptr> [#uses=1]
+ at _ZTV1C = linkonce_odr constant [6 x ptr] [ptr null, ptr @_ZTI1C, ptr @_ZN1C1fEv, ptr inttoptr (i64 -8 to ptr), ptr @_ZTI1C, ptr @_ZThn8_N1C1fEv] ; <ptr> [#uses=2]
+ at _ZTV1B = linkonce_odr constant [3 x ptr] [ptr null, ptr @_ZTI1B, ptr @_ZN1B1fEv] ; <ptr> [#uses=1]
+ at _ZTV1A = linkonce_odr constant [3 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A1fEv] ; <ptr> [#uses=1]
 
 define i32 @_Z1gv() ssp {
 entry:
-  %d = alloca %struct.C, align 8                  ; <%struct.C*> [#uses=2]
-  call void @_ZN1DC1Ev(%struct.C* %d)
-  %call = call i32 @_Z1fP1D(%struct.C* %d)        ; <i32> [#uses=1]
+  %d = alloca %struct.C, align 8                  ; <ptr> [#uses=2]
+  call void @_ZN1DC1Ev(ptr %d)
+  %call = call i32 @_Z1fP1D(ptr %d)        ; <i32> [#uses=1]
   %X = add i32 %call, 3
   ret i32 %X
 }
 
-define linkonce_odr void @_ZN1DC1Ev(%struct.C* %this) inlinehint ssp align 2 {
+define linkonce_odr void @_ZN1DC1Ev(ptr %this) inlinehint ssp align 2 {
 entry:
-  call void @_ZN1DC2Ev(%struct.C* %this)
+  call void @_ZN1DC2Ev(ptr %this)
   ret void
 }
 
-define internal i32 @_Z1fP1D(%struct.C* %d) ssp {
+define internal i32 @_Z1fP1D(ptr %d) ssp {
 entry:
-  %0 = icmp eq %struct.C* %d, null                ; <i1> [#uses=1]
+  %0 = icmp eq ptr %d, null                ; <i1> [#uses=1]
   br i1 %0, label %cast.end, label %cast.notnull
 
 cast.notnull:                                     ; preds = %entry
-  %1 = bitcast %struct.C* %d to i8*               ; <i8*> [#uses=1]
-  %add.ptr = getelementptr i8, i8* %1, i64 8          ; <i8*> [#uses=1]
-  %2 = bitcast i8* %add.ptr to %struct.A*         ; <%struct.A*> [#uses=1]
+  %add.ptr = getelementptr i8, ptr %d, i64 8          ; <ptr> [#uses=1]
   br label %cast.end
 
 cast.end:                                         ; preds = %entry, %cast.notnull
-  %3 = phi %struct.A* [ %2, %cast.notnull ], [ null, %entry ] ; <%struct.A*> [#uses=2]
-  %4 = bitcast %struct.A* %3 to i32 (%struct.A*)*** ; <i32 (%struct.A*)***> [#uses=1]
-  %5 = load i32 (%struct.A*)**, i32 (%struct.A*)*** %4                ; <i32 (%struct.A*)**> [#uses=1]
-  %vfn = getelementptr inbounds i32 (%struct.A*)*, i32 (%struct.A*)** %5, i64 0 ; <i32 (%struct.A*)**> [#uses=1]
-  %6 = load i32 (%struct.A*)*, i32 (%struct.A*)** %vfn               ; <i32 (%struct.A*)*> [#uses=1]
-  %call = call i32 %6(%struct.A* %3)              ; <i32> [#uses=1]
+  %1 = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] ; <ptr> [#uses=2]
+  %2 = load ptr, ptr %1                ; <ptr> [#uses=1]
+  %vfn = getelementptr inbounds ptr, ptr %2, i64 0 ; <ptr> [#uses=1]
+  %3 = load ptr, ptr %vfn               ; <ptr> [#uses=1]
+  %call = call i32 %3(ptr %1)              ; <i32> [#uses=1]
   ret i32 %call
 }
 
-define linkonce_odr i32 @_ZN1D1fEv(%struct.C* %this) ssp align 2 {
+define linkonce_odr i32 @_ZN1D1fEv(ptr %this) ssp align 2 {
 entry:
   ret i32 4
 }
 
-define linkonce_odr i32 @_ZThn8_N1D1fEv(%struct.C* %this) ssp {
+define linkonce_odr i32 @_ZThn8_N1D1fEv(ptr %this) ssp {
 entry:
-  %0 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %1 = getelementptr inbounds i8, i8* %0, i64 -8      ; <i8*> [#uses=1]
-  %2 = bitcast i8* %1 to %struct.C*               ; <%struct.C*> [#uses=1]
-  %call = call i32 @_ZN1D1fEv(%struct.C* %2)      ; <i32> [#uses=1]
+  %0 = getelementptr inbounds i8, ptr %this, i64 -8      ; <ptr> [#uses=1]
+  %call = call i32 @_ZN1D1fEv(ptr %0)      ; <i32> [#uses=1]
   ret i32 %call
 }
 
-define linkonce_odr void @_ZN1DC2Ev(%struct.C* %this) inlinehint ssp align 2 {
+define linkonce_odr void @_ZN1DC2Ev(ptr %this) inlinehint ssp align 2 {
 entry:
-  call void @_ZN1CC2Ev(%struct.C* %this)
-  %0 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %1 = getelementptr inbounds i8, i8* %0, i64 0       ; <i8*> [#uses=1]
-  %2 = bitcast i8* %1 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([6 x i8*], [6 x i8*]* @_ZTV1D, i64 0, i64 2), i8*** %2
-  %3 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %4 = getelementptr inbounds i8, i8* %3, i64 8       ; <i8*> [#uses=1]
-  %5 = bitcast i8* %4 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([6 x i8*], [6 x i8*]* @_ZTV1D, i64 0, i64 5), i8*** %5
+  call void @_ZN1CC2Ev(ptr %this)
+  %0 = getelementptr inbounds i8, ptr %this, i64 0       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([6 x ptr], ptr @_ZTV1D, i64 0, i64 2), ptr %0
+  %1 = getelementptr inbounds i8, ptr %this, i64 8       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([6 x ptr], ptr @_ZTV1D, i64 0, i64 5), ptr %1
   ret void
 }
 
-define linkonce_odr void @_ZN1CC2Ev(%struct.C* %this) inlinehint ssp align 2 {
+define linkonce_odr void @_ZN1CC2Ev(ptr %this) inlinehint ssp align 2 {
 entry:
-  %0 = bitcast %struct.C* %this to %struct.A*     ; <%struct.A*> [#uses=1]
-  call void @_ZN1AC2Ev(%struct.A* %0)
-  %1 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %2 = getelementptr inbounds i8, i8* %1, i64 8       ; <i8*> [#uses=1]
-  %3 = bitcast i8* %2 to %struct.A*               ; <%struct.A*> [#uses=1]
-  call void @_ZN1BC2Ev(%struct.A* %3)
-  %4 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %5 = getelementptr inbounds i8, i8* %4, i64 0       ; <i8*> [#uses=1]
-  %6 = bitcast i8* %5 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([6 x i8*], [6 x i8*]* @_ZTV1C, i64 0, i64 2), i8*** %6
-  %7 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %8 = getelementptr inbounds i8, i8* %7, i64 8       ; <i8*> [#uses=1]
-  %9 = bitcast i8* %8 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([6 x i8*], [6 x i8*]* @_ZTV1C, i64 0, i64 5), i8*** %9
+  call void @_ZN1AC2Ev(ptr %this)
+  %0 = getelementptr inbounds i8, ptr %this, i64 8       ; <ptr> [#uses=1]
+  call void @_ZN1BC2Ev(ptr %0)
+  %1 = getelementptr inbounds i8, ptr %this, i64 0       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([6 x ptr], ptr @_ZTV1C, i64 0, i64 2), ptr %1
+  %2 = getelementptr inbounds i8, ptr %this, i64 8       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([6 x ptr], ptr @_ZTV1C, i64 0, i64 5), ptr %2
   ret void
 }
 
-define linkonce_odr i32 @_ZN1C1fEv(%struct.C* %this) ssp align 2 {
+define linkonce_odr i32 @_ZN1C1fEv(ptr %this) ssp align 2 {
 entry:
   ret i32 3
 }
 
-define linkonce_odr i32 @_ZThn8_N1C1fEv(%struct.C* %this) {
+define linkonce_odr i32 @_ZThn8_N1C1fEv(ptr %this) {
 entry:
-  %0 = bitcast %struct.C* %this to i8*            ; <i8*> [#uses=1]
-  %1 = getelementptr inbounds i8, i8* %0, i64 -8      ; <i8*> [#uses=1]
-  %2 = bitcast i8* %1 to %struct.C*               ; <%struct.C*> [#uses=1]
-  %call = call i32 @_ZN1C1fEv(%struct.C* %2)      ; <i32> [#uses=1]
+  %0 = getelementptr inbounds i8, ptr %this, i64 -8      ; <ptr> [#uses=1]
+  %call = call i32 @_ZN1C1fEv(ptr %0)      ; <i32> [#uses=1]
   ret i32 %call
 }
 
-define linkonce_odr void @_ZN1AC2Ev(%struct.A* %this) inlinehint ssp align 2 {
+define linkonce_odr void @_ZN1AC2Ev(ptr %this) inlinehint ssp align 2 {
 entry:
-  %0 = bitcast %struct.A* %this to i8*            ; <i8*> [#uses=1]
-  %1 = getelementptr inbounds i8, i8* %0, i64 0       ; <i8*> [#uses=1]
-  %2 = bitcast i8* %1 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %2
+  %0 = getelementptr inbounds i8, ptr %this, i64 0       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %0
   ret void
 }
 
-define linkonce_odr void @_ZN1BC2Ev(%struct.A* %this) inlinehint ssp align 2 {
+define linkonce_odr void @_ZN1BC2Ev(ptr %this) inlinehint ssp align 2 {
 entry:
-  %0 = bitcast %struct.A* %this to i8*            ; <i8*> [#uses=1]
-  %1 = getelementptr inbounds i8, i8* %0, i64 0       ; <i8*> [#uses=1]
-  %2 = bitcast i8* %1 to i8***                    ; <i8***> [#uses=1]
-  store i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1B, i64 0, i64 2), i8*** %2
+  %0 = getelementptr inbounds i8, ptr %this, i64 0       ; <ptr> [#uses=1]
+  store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1B, i64 0, i64 2), ptr %0
   ret void
 }
 
-define linkonce_odr i32 @_ZN1B1fEv(%struct.A* %this) ssp align 2 {
+define linkonce_odr i32 @_ZN1B1fEv(ptr %this) ssp align 2 {
 entry:
   ret i32 2
 }
 
-define linkonce_odr i32 @_ZN1A1fEv(%struct.A* %this) ssp align 2 {
+define linkonce_odr i32 @_ZN1A1fEv(ptr %this) ssp align 2 {
 entry:
   ret i32 1
 }

diff  --git a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
index 5579909bef67f..fabbfa4ec2d1e 100644
--- a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
+++ b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
@@ -3,13 +3,13 @@
 target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.15.0"
 
-define void @caller1(i8 *%p1, i1 %b) {
+define void @caller1(ptr %p1, i1 %b) {
 ; CHECK-LABEL: @caller1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq i1 [[B:%.*]], true
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]]
 ; CHECK:       split:
-; CHECK-NEXT:    call void @callee(i8* [[P1:%.*]], i32 0, i32 -1)
+; CHECK-NEXT:    call void @callee(ptr [[P1:%.*]], i32 0, i32 -1)
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -20,36 +20,34 @@ entry:
 
 split:
   ; This path may be generated from CS splitting and never taken at runtime.
-  call void @callee(i8* %p1, i32 0, i32 -1)
+  call void @callee(ptr %p1, i32 0, i32 -1)
   br label %exit
 
 exit:
   ret void
 }
 
-define  void @callee(i8* %p1, i32 %l1, i32 %l2) {
+define  void @callee(ptr %p1, i32 %l1, i32 %l2) {
 entry:
   %ext = zext i32 %l2 to i64
   %vla = alloca float, i64 %ext, align 16
-  call void @extern_call(float* nonnull %vla) #3
+  call void @extern_call(ptr nonnull %vla) #3
   ret void
 }
 
 
-define void @caller2_below_threshold(i8 *%p1, i1 %b) {
+define void @caller2_below_threshold(ptr %p1, i1 %b) {
 ; CHECK-LABEL: @caller2_below_threshold(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[VLA_I:%.*]] = alloca float, i64 15000, align 16
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq i1 [[B:%.*]], true
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]]
 ; CHECK:       split:
-; CHECK-NEXT:    [[SAVEDSTACK:%.*]] = call i8* @llvm.stacksave()
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[VLA_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 60000, i8* [[TMP0]])
-; CHECK-NEXT:    call void @extern_call(float* nonnull [[VLA_I]]) #3
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[VLA_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 60000, i8* [[TMP1]])
-; CHECK-NEXT:    call void @llvm.stackrestore(i8* [[SAVEDSTACK]])
+; CHECK-NEXT:    [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave()
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT:    call void @extern_call(ptr nonnull [[VLA_I]]) #3
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT:    call void @llvm.stackrestore(ptr [[SAVEDSTACK]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -59,27 +57,27 @@ entry:
   br i1 %cond, label %exit, label %split
 
 split:
-  call void @callee(i8* %p1, i32 0, i32 15000)
+  call void @callee(ptr %p1, i32 0, i32 15000)
   br label %exit
 
 exit:
   ret void
 }
 
-define  void @callee2_not_in_entry(i8* %p1, i32 %l1, i32 %l2) {
+define  void @callee2_not_in_entry(ptr %p1, i32 %l1, i32 %l2) {
 entry:
   %ext = zext i32 %l2 to i64
   %c = icmp eq i32 %l1, 42
   br i1 %c, label %bb2, label %bb3
 bb2:
   %vla = alloca float, i64 %ext, align 16
-  call void @extern_call(float* nonnull %vla) #3
+  call void @extern_call(ptr nonnull %vla) #3
   ret void
 bb3:
   ret void
 }
 
-define void @caller3_alloca_not_in_entry(i8 *%p1, i1 %b) {
+define void @caller3_alloca_not_in_entry(ptr %p1, i1 %b) {
 ; CHECK-LABEL: @caller3_alloca_not_in_entry(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq i1 [[B:%.*]], true
@@ -94,20 +92,20 @@ entry:
   br i1 %cond, label %exit, label %split
 
 split:
-  call void @callee2_not_in_entry(i8* %p1, i32 0, i32 -1)
+  call void @callee2_not_in_entry(ptr %p1, i32 0, i32 -1)
   br label %exit
 
 exit:
   ret void
 }
 
-define void @caller4_over_threshold(i8 *%p1, i1 %b, i32 %len) {
+define void @caller4_over_threshold(ptr %p1, i1 %b, i32 %len) {
 ; CHECK-LABEL: @caller4_over_threshold(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq i1 [[B:%.*]], true
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]]
 ; CHECK:       split:
-; CHECK-NEXT:    call void @callee(i8* [[P1:%.*]], i32 0, i32 16500)
+; CHECK-NEXT:    call void @callee(ptr [[P1:%.*]], i32 0, i32 16500)
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -117,15 +115,15 @@ entry:
   br i1 %cond, label %exit, label %split
 
 split:
-  call void @callee(i8* %p1, i32 0, i32 16500)
+  call void @callee(ptr %p1, i32 0, i32 16500)
   br label %exit
 
 exit:
   ret void
 }
 
-declare noalias i8* @malloc(i64)
-define i8* @stack_allocate(i32 %size) #2 {
+declare noalias ptr @malloc(i64)
+define ptr @stack_allocate(i32 %size) #2 {
 entry:
   %cmp = icmp ult i32 %size, 100
   %conv = zext i32 %size to i64
@@ -136,18 +134,18 @@ if.then:                                          ; preds = %entry
   br label %return
 
 if.end:                                           ; preds = %entry
-  %call = tail call i8* @malloc(i64 %conv) #3
+  %call = tail call ptr @malloc(i64 %conv) #3
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %retval.0 = phi i8* [ %0, %if.then ], [ %call, %if.end ]
-  ret i8* %retval.0
+  %retval.0 = phi ptr [ %0, %if.then ], [ %call, %if.end ]
+  ret ptr %retval.0
 }
 
-define i8* @test_stack_allocate_always(i32 %size) {
+define ptr @test_stack_allocate_always(i32 %size) {
 ; CHECK-LABEL: @test_stack_allocate_always(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAVEDSTACK:%.*]] = call i8* @llvm.stacksave()
+; CHECK-NEXT:    [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave()
 ; CHECK-NEXT:    [[CMP_I:%.*]] = icmp ult i32 [[SIZE:%.*]], 100
 ; CHECK-NEXT:    [[CONV_I:%.*]] = zext i32 [[SIZE]] to i64
 ; CHECK-NEXT:    br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
@@ -155,19 +153,19 @@ define i8* @test_stack_allocate_always(i32 %size) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = alloca i8, i64 [[CONV_I]], align 8
 ; CHECK-NEXT:    br label [[STACK_ALLOCATE_EXIT:%.*]]
 ; CHECK:       if.end.i:
-; CHECK-NEXT:    [[CALL_I:%.*]] = tail call i8* @malloc(i64 [[CONV_I]]) #3
+; CHECK-NEXT:    [[CALL_I:%.*]] = tail call ptr @malloc(i64 [[CONV_I]]) #3
 ; CHECK-NEXT:    br label [[STACK_ALLOCATE_EXIT]]
 ; CHECK:       stack_allocate.exit:
-; CHECK-NEXT:    [[RETVAL_0_I:%.*]] = phi i8* [ [[TMP0]], [[IF_THEN_I]] ], [ [[CALL_I]], [[IF_END_I]] ]
-; CHECK-NEXT:    call void @llvm.stackrestore(i8* [[SAVEDSTACK]])
-; CHECK-NEXT:    ret i8* [[RETVAL_0_I]]
+; CHECK-NEXT:    [[RETVAL_0_I:%.*]] = phi ptr [ [[TMP0]], [[IF_THEN_I]] ], [ [[CALL_I]], [[IF_END_I]] ]
+; CHECK-NEXT:    call void @llvm.stackrestore(ptr [[SAVEDSTACK]])
+; CHECK-NEXT:    ret ptr [[RETVAL_0_I]]
 ;
 entry:
-  %call = tail call i8* @stack_allocate(i32 %size)
-  ret i8* %call
+  %call = tail call ptr @stack_allocate(i32 %size)
+  ret ptr %call
 }
 
-declare void @extern_call(float*)
+declare void @extern_call(ptr)
 
 attributes #1 = { argmemonly nounwind willreturn writeonly }
 attributes #2 = { alwaysinline }

diff  --git a/llvm/test/Transforms/Inline/dynamic_alloca_test.ll b/llvm/test/Transforms/Inline/dynamic_alloca_test.ll
index 2716d433d8724..61f759c0890c7 100644
--- a/llvm/test/Transforms/Inline/dynamic_alloca_test.ll
+++ b/llvm/test/Transforms/Inline/dynamic_alloca_test.ll
@@ -10,18 +10,18 @@
 ; once that functionality is restored.
 ; XFAIL: *
 
-declare void @ext(i32*)
+declare void @ext(ptr)
 
 define internal void @callee(i32 %N) {
   %P = alloca i32, i32 %N
-  call void @ext(i32* %P)
+  call void @ext(ptr %P)
   ret void
 }
 
 define void @foo(i32 %N) {
 ; CHECK-LABEL: @foo(
 ; CHECK: alloca i32, i32 %{{.*}}
-; CHECK: call i8* @llvm.stacksave()
+; CHECK: call ptr @llvm.stacksave()
 ; CHECK: alloca i32, i32 %{{.*}}
 ; CHECK: call void @ext
 ; CHECK: call void @llvm.stackrestore
@@ -29,7 +29,7 @@ define void @foo(i32 %N) {
 
 entry:
   %P = alloca i32, i32 %N
-  call void @ext(i32* %P)
+  call void @ext(ptr %P)
   br label %loop
 
 loop:

diff  --git a/llvm/test/Transforms/Inline/ephemeral.ll b/llvm/test/Transforms/Inline/ephemeral.ll
index 11edfbb7528cc..0bbdd2e2e2eb7 100644
--- a/llvm/test/Transforms/Inline/ephemeral.ll
+++ b/llvm/test/Transforms/Inline/ephemeral.ll
@@ -7,8 +7,8 @@
 ; the instructions feeding the assume.
 ; CHECK: Analyzing call of inner...
 ; CHECK: NumInstructions: 2
-define i32 @inner(i8* %y) {
-  %a1 = load volatile i32, i32* @a
+define i32 @inner(ptr %y) {
+  %a1 = load volatile i32, ptr @a
 
   ; Because these instructions are used only by the @llvm.assume intrinsic,
   ; they're free and should not be included in the instruction count when
@@ -19,7 +19,7 @@ define i32 @inner(i8* %y) {
   %a5 = mul i32 %a4, %a4
   %a6 = add i32 %a5, %a5
   %ca = icmp sgt i32 %a6, -7
-  %r = call i1 @llvm.type.test(i8* %y, metadata !0)
+  %r = call i1 @llvm.type.test(ptr %y, metadata !0)
   %ca2 = icmp eq i1 %ca, %r
   tail call void @llvm.assume(i1 %ca2)
 
@@ -30,20 +30,20 @@ define i32 @inner(i8* %y) {
 ; are both ephemeral.
 ; CHECK: Analyzing call of inner2...
 ; CHECK: NumInstructions: 1
-define void @inner2(i8* %y) {
-  %v = load i8, i8* %y
+define void @inner2(ptr %y) {
+  %v = load i8, ptr %y
   %c = icmp eq i8 %v, 42
   call void @llvm.assume(i1 %c)
   ret void
 }
 
-define i32 @outer(i8* %y) optsize {
-   %r = call i32 @inner(i8* %y)
-   call void @inner2(i8* %y)
+define i32 @outer(ptr %y) optsize {
+   %r = call i32 @inner(ptr %y)
+   call void @inner2(ptr %y)
    ret i32 %r
 }
 
 declare void @llvm.assume(i1) nounwind
-declare i1 @llvm.type.test(i8*, metadata) nounwind readnone
+declare i1 @llvm.type.test(ptr, metadata) nounwind readnone
 
 !0 = !{i32 0, !"typeid1"}

diff  --git a/llvm/test/Transforms/Inline/frameescape.ll b/llvm/test/Transforms/Inline/frameescape.ll
index 7d0f370e574ba..8566a7524c38d 100644
--- a/llvm/test/Transforms/Inline/frameescape.ll
+++ b/llvm/test/Transforms/Inline/frameescape.ll
@@ -5,23 +5,22 @@
 ; PR23216: We can't inline functions using llvm.localescape.
 
 declare void @llvm.localescape(...)
-declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.localrecover(i8*, i8*, i32)
+declare ptr @llvm.frameaddress(i32)
+declare ptr @llvm.localrecover(ptr, ptr, i32)
 
-define internal void @foo(i8* %fp) {
-  %a.i8 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @bar to i8*), i8* %fp, i32 0)
-  %a = bitcast i8* %a.i8 to i32*
-  store i32 42, i32* %a
+define internal void @foo(ptr %fp) {
+  %a.i8 = call ptr @llvm.localrecover(ptr @bar, ptr %fp, i32 0)
+  store i32 42, ptr %a.i8
   ret void
 }
 
 define internal i32 @bar() {
 entry:
   %a = alloca i32
-  call void (...) @llvm.localescape(i32* %a)
-  %fp = call i8* @llvm.frameaddress(i32 0)
-  tail call void @foo(i8* %fp)
-  %r = load i32, i32* %a
+  call void (...) @llvm.localescape(ptr %a)
+  %fp = call ptr @llvm.frameaddress(i32 0)
+  tail call void @foo(ptr %fp)
+  %r = load i32, ptr %a
   ret i32 %r
 }
 
@@ -29,8 +28,8 @@ entry:
 define internal i32 @bar_alwaysinline() alwaysinline {
 entry:
   %a = alloca i32
-  call void (...) @llvm.localescape(i32* %a)
-  tail call void @foo(i8* null)
+  call void (...) @llvm.localescape(ptr %a)
+  tail call void @foo(ptr null)
   ret i32 0
 }
 

diff  --git a/llvm/test/Transforms/Inline/function-count-update-3.ll b/llvm/test/Transforms/Inline/function-count-update-3.ll
index 215d64175faf7..3e1106710098a 100644
--- a/llvm/test/Transforms/Inline/function-count-update-3.ll
+++ b/llvm/test/Transforms/Inline/function-count-update-3.ll
@@ -52,7 +52,7 @@ define i32 @e(i32 %c1) !prof !4 {
 
 cond_false:
   call void @ext()
-  %c2 = load i32, i32* @data, align 4
+  %c2 = load i32, ptr @data, align 4
   %c3 = add i32 %c1, %c2
   %c4 = mul i32 %c3, %c2
   %c5 = add i32 %c4, %c2

diff  --git a/llvm/test/Transforms/Inline/gep_from_constant.ll b/llvm/test/Transforms/Inline/gep_from_constant.ll
index 3fe23818b9dd1..e1877b0587f00 100644
--- a/llvm/test/Transforms/Inline/gep_from_constant.ll
+++ b/llvm/test/Transforms/Inline/gep_from_constant.ll
@@ -1,15 +1,15 @@
 ; RUN: opt < %s -passes="print<inline-cost>" 2>&1 | FileCheck %s
 
 ; CHECK-LABEL: @foo
-; CHECK: cost before = {{.*}}, cost after = {{.*}}, threshold before = {{.*}}, threshold after = {{.*}}, cost delta = {{.*}}, simplified to i8 addrspace(1)** inttoptr (i64 754974760 to i8 addrspace(1)**)
+; CHECK: cost before = {{.*}}, cost after = {{.*}}, threshold before = {{.*}}, threshold after = {{.*}}, cost delta = {{.*}}, simplified to ptr inttoptr (i64 754974760 to ptr)
 
-define i8 addrspace(1)** @foo(i64 %0) {
-  %2 = inttoptr i64 754974720 to i8 addrspace(1)**
-  %3 = getelementptr i8 addrspace(1)*, i8 addrspace(1)** %2, i64 %0
-  ret i8 addrspace(1)** %3
+define ptr @foo(i64 %0) {
+  %2 = inttoptr i64 754974720 to ptr
+  %3 = getelementptr ptr addrspace(1), ptr %2, i64 %0
+  ret ptr %3
 }
 
-define i8 addrspace(1)** @main() {
-  %1 = call i8 addrspace(1)** @foo(i64 5)
-  ret i8 addrspace(1)** %1
+define ptr @main() {
+  %1 = call ptr @foo(i64 5)
+  ret ptr %1
 }

diff  --git a/llvm/test/Transforms/Inline/gvn-inline-iteration.ll b/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
index c808532f0bc4a..053b7cb2b51be 100644
--- a/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
+++ b/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
@@ -4,11 +4,11 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.0.0"
 
-define i32 @foo(i32 ()** noalias nocapture %p, i64* noalias nocapture %q) nounwind ssp {
+define i32 @foo(ptr noalias nocapture %p, ptr noalias nocapture %q) nounwind ssp {
 entry:
-  store i32 ()* @bar, i32 ()** %p
-  store i64 0, i64* %q
-  %tmp3 = load i32 ()*, i32 ()** %p                        ; <i32 ()*> [#uses=1]
+  store ptr @bar, ptr %p
+  store i64 0, ptr %q
+  %tmp3 = load ptr, ptr %p                        ; <ptr> [#uses=1]
   %call = tail call i32 %tmp3() nounwind          ; <i32> [#uses=1]
   ret i32 %call
 }

diff  --git a/llvm/test/Transforms/Inline/inalloca-not-static.ll b/llvm/test/Transforms/Inline/inalloca-not-static.ll
index a5d7f55065ff6..44fd698de8649 100644
--- a/llvm/test/Transforms/Inline/inalloca-not-static.ll
+++ b/llvm/test/Transforms/Inline/inalloca-not-static.ll
@@ -23,11 +23,11 @@ target triple = "i386-pc-windows-msvc19.0.24210"
 
 %struct.Foo = type { i32 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 
-declare x86_thiscallcc %struct.Foo* @"\01??0Foo@@QAE at XZ"(%struct.Foo* returned) unnamed_addr
-declare x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(%struct.Foo*) unnamed_addr
+declare x86_thiscallcc ptr @"\01??0Foo@@QAE at XZ"(ptr returned) unnamed_addr
+declare x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(ptr) unnamed_addr
 
 define void @f() {
 entry:
@@ -37,29 +37,25 @@ entry:
 
 define internal void @g() alwaysinline {
 entry:
-  %inalloca.save = call i8* @llvm.stacksave()
+  %inalloca.save = call ptr @llvm.stacksave()
   %argmem = alloca inalloca <{ %struct.Foo }>, align 4
-  %0 = getelementptr inbounds <{ %struct.Foo }>, <{ %struct.Foo }>* %argmem, i32 0, i32 0
-  %call = call x86_thiscallcc %struct.Foo* @"\01??0Foo@@QAE at XZ"(%struct.Foo* %0)
-  call void @h(<{ %struct.Foo }>* inalloca(<{ %struct.Foo }>) %argmem)
-  call void @llvm.stackrestore(i8* %inalloca.save)
+  %call = call x86_thiscallcc ptr @"\01??0Foo@@QAE at XZ"(ptr %argmem)
+  call void @h(ptr inalloca(<{ %struct.Foo }>) %argmem)
+  call void @llvm.stackrestore(ptr %inalloca.save)
   ret void
 }
 
 ; Function Attrs: alwaysinline inlinehint nounwind
-define internal void @h(<{ %struct.Foo }>* inalloca(<{ %struct.Foo }>)) alwaysinline {
+define internal void @h(ptr inalloca(<{ %struct.Foo }>)) alwaysinline {
 entry:
-  %o = getelementptr inbounds <{ %struct.Foo }>, <{ %struct.Foo }>* %0, i32 0, i32 0
-  call x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(%struct.Foo* %o)
+  call x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(ptr %0)
   ret void
 }
 
 ; CHECK: define void @f()
-; CHECK:   %[[STACKSAVE:.*]] = call i8* @llvm.stacksave()
+; CHECK:   %[[STACKSAVE:.*]] = call ptr @llvm.stacksave()
 ; CHECK:   %[[ARGMEM:.*]] = alloca inalloca <{ %struct.Foo }>, align 4
-; CHECK:   %[[GEP1:.*]] = getelementptr inbounds <{ %struct.Foo }>, <{ %struct.Foo }>* %[[ARGMEM]], i32 0, i32 0
-; CHECK:   %[[CALL:.*]] = call x86_thiscallcc %struct.Foo* @"\01??0Foo@@QAE at XZ"(%struct.Foo* %[[GEP1]])
-; CHECK:   %[[GEP2:.*]] = getelementptr inbounds <{ %struct.Foo }>, <{ %struct.Foo }>* %[[ARGMEM]], i32 0, i32 0
-; CHECK:   call x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(%struct.Foo* %[[GEP2]])
-; CHECK:   call void @llvm.stackrestore(i8* %[[STACKSAVE]])
+; CHECK:   %[[CALL:.*]] = call x86_thiscallcc ptr @"\01??0Foo@@QAE at XZ"(ptr %[[ARGMEM]])
+; CHECK:   call x86_thiscallcc void @"\01??1Foo@@QAE at XZ"(ptr %[[ARGMEM]])
+; CHECK:   call void @llvm.stackrestore(ptr %[[STACKSAVE]])
 ; CHECK:   ret void

diff  --git a/llvm/test/Transforms/Inline/inline-assume.ll b/llvm/test/Transforms/Inline/inline-assume.ll
index 42952bcb655f9..287a5625ea77d 100644
--- a/llvm/test/Transforms/Inline/inline-assume.ll
+++ b/llvm/test/Transforms/Inline/inline-assume.ll
@@ -3,12 +3,12 @@
 ; RUN: opt -passes='module-inline' -S < %s | FileCheck %s
 
 %0 = type opaque
-%struct.Foo = type { i32, %0* }
+%struct.Foo = type { i32, ptr }
 
 ; Test that we don't crash when inlining @bar (rdar://22521387).
-define void @foo(%struct.Foo* align 4 %a) {
+define void @foo(ptr align 4 %a) {
 entry:
-  call fastcc void @bar(%struct.Foo* nonnull align 4 undef)
+  call fastcc void @bar(ptr nonnull align 4 undef)
 
 ; CHECK: call void @llvm.assume(i1 undef)
 ; CHECK: unreachable
@@ -16,10 +16,10 @@ entry:
   ret void
 }
 
-define fastcc void @bar(%struct.Foo* align 4 %a) {
+define fastcc void @bar(ptr align 4 %a) {
 ; CHECK-LABEL: @bar
 entry:
-  %b = getelementptr inbounds %struct.Foo, %struct.Foo* %a, i32 0, i32 1
+  %b = getelementptr inbounds %struct.Foo, ptr %a, i32 0, i32 1
   br i1 undef, label %if.end, label %if.then.i.i
 
 if.then.i.i:

diff  --git a/llvm/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll b/llvm/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
index cbf4af72fbf55..767a584143026 100644
--- a/llvm/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
+++ b/llvm/test/Transforms/Inline/inline-constexpr-addrspacecast-argument.ll
@@ -7,21 +7,21 @@ target datalayout = "e-p3:32:32-p4:64:64-n32"
 @lds = internal addrspace(3) global [64 x i64] zeroinitializer
 
 ; CHECK-LABEL: @constexpr_addrspacecast_ptr_size_change(
-; CHECK: load i64, i64 addrspace(4)* addrspacecast (i64 addrspace(3)* getelementptr inbounds ([64 x i64], [64 x i64] addrspace(3)* @lds, i32 0, i32 0) to i64 addrspace(4)*)
+; CHECK: load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @lds to ptr addrspace(4))
 ; CHECK-NEXT: br
 define void @constexpr_addrspacecast_ptr_size_change() #0 {
-  %tmp0 = call i32 @foo(i64 addrspace(4)* addrspacecast (i64 addrspace(3)* getelementptr inbounds ([64 x i64], [64 x i64] addrspace(3)* @lds, i32 0, i32 0) to i64 addrspace(4)*)) #1
+  %tmp0 = call i32 @foo(ptr addrspace(4) addrspacecast (ptr addrspace(3) @lds to ptr addrspace(4))) #1
   ret void
 }
 
-define i32 @foo(i64 addrspace(4)* %arg) #1 {
+define i32 @foo(ptr addrspace(4) %arg) #1 {
 bb:
-  %tmp = getelementptr i64, i64 addrspace(4)* %arg, i64 undef
-  %tmp1 = load i64, i64 addrspace(4)* %tmp
+  %tmp = getelementptr i64, ptr addrspace(4) %arg, i64 undef
+  %tmp1 = load i64, ptr addrspace(4) %tmp
   br i1 undef, label %bb2, label %bb3
 
 bb2:
-  store i64 0, i64 addrspace(4)* %tmp
+  store i64 0, ptr addrspace(4) %tmp
   br label %bb3
 
 bb3:

diff  --git a/llvm/test/Transforms/Inline/inline-cost-annotation-pass.ll b/llvm/test/Transforms/Inline/inline-cost-annotation-pass.ll
index 17b2e1581e6e7..5c07159822ca1 100644
--- a/llvm/test/Transforms/Inline/inline-cost-annotation-pass.ll
+++ b/llvm/test/Transforms/Inline/inline-cost-annotation-pass.ll
@@ -1,11 +1,11 @@
 ; RUN: opt < %s -passes="print<inline-cost>" 2>&1 | FileCheck %s
 
 ; CHECK:       Analyzing call of foo... (caller:main)
-; CHECK: define i8 addrspace(1)** @foo() {
+; CHECK: define ptr @foo() {
 ; CHECK:  cost before = {{.*}}, cost after = {{.*}}, threshold before = {{.*}}, threshold after = {{.*}}, cost delta = {{.*}}
-; CHECK:  %1 = inttoptr i64 754974720 to i8 addrspace(1)**
+; CHECK:  %1 = inttoptr i64 754974720 to ptr
 ; CHECK:  cost before = {{.*}}, cost after = {{.*}}, threshold before = {{.*}}, threshold after = {{.*}}, cost delta = {{.*}}
-; CHECK:  ret i8 addrspace(1)** %1
+; CHECK:  ret ptr %1
 ; CHECK: }
 ; CHECK:       NumConstantArgs: {{.*}}
 ; CHECK:       NumConstantOffsetPtrArgs: {{.*}}
@@ -23,13 +23,13 @@
 ; CHECK-EMPTY:
 ; CHECK:  Analyzing call of foo... (caller:main)
 
-define i8 addrspace(1)** @foo() {
-  %1 = inttoptr i64 754974720 to i8 addrspace(1)**
-  ret i8 addrspace(1)** %1
+define ptr @foo() {
+  %1 = inttoptr i64 754974720 to ptr
+  ret ptr %1
 }
 
-define i8 addrspace(1)** @main() {
-  %1 = call i8 addrspace(1)** @foo()
-  %2 = call i8 addrspace(1)** @foo()
-  ret i8 addrspace(1)** %1
+define ptr @main() {
+  %1 = call ptr @foo()
+  %2 = call ptr @foo()
+  ret ptr %1
 }

diff  --git a/llvm/test/Transforms/Inline/inline-cost-dead-users.ll b/llvm/test/Transforms/Inline/inline-cost-dead-users.ll
index 5dba308c5e28a..d7dbd234e428b 100644
--- a/llvm/test/Transforms/Inline/inline-cost-dead-users.ll
+++ b/llvm/test/Transforms/Inline/inline-cost-dead-users.ll
@@ -14,19 +14,19 @@
 %0 = type { i64, i64, i64 }
 %1 = type { i64, i64, i64 }
 
-define internal void @f(%0* align 8 %a) unnamed_addr {
+define internal void @f(ptr align 8 %a) unnamed_addr {
 start:
   ret void
 }
 
-define internal void @g(%0* align 8 %a) unnamed_addr {
+define internal void @g(ptr align 8 %a) unnamed_addr {
 start:
   ret void
 }
 
-define void @h(%0* align 8 %a, %1* align 8 %b) unnamed_addr {
+define void @h(ptr align 8 %a, ptr align 8 %b) unnamed_addr {
 start:
-  call void @f(%0* align 8 %a)
-  call void bitcast (void (%0*)* @g to void (%1*)*)(%1* align 8 %b)
+  call void @f(ptr align 8 %a)
+  call void @g(ptr align 8 %b)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/inline-fast-math-flags.ll b/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
index 0c446c1a5b4ff..11c4d2b934391 100644
--- a/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
+++ b/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
@@ -8,17 +8,17 @@
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
-define float @foo(float* %a, float %b) {
+define float @foo(ptr %a, float %b) {
 entry:
-  %a0 = load float, float* %a, align 4
+  %a0 = load float, ptr %a, align 4
   %mul = fmul fast float %a0, %b
   %tobool = fcmp une float %mul, 0.000000e+00
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %a1 = load float, float* %a, align 8
-  %arrayidx1 = getelementptr inbounds float, float* %a, i64 1
-  %a2 = load float, float* %arrayidx1, align 4
+  %a1 = load float, ptr %a, align 8
+  %arrayidx1 = getelementptr inbounds float, ptr %a, i64 1
+  %a2 = load float, ptr %arrayidx1, align 4
   %add = fadd fast float %a1, %a2
   br label %if.end
 
@@ -29,8 +29,8 @@ if.end:                                           ; preds = %if.then, %entry
 
 ; CHECK-LABEL: @boo
 ; CHECK-NOT: call float @foo
-define float @boo(float* %a) {
+define float @boo(ptr %a) {
 entry:
-  %call = call float @foo(float* %a, float 0.000000e+00)
+  %call = call float @foo(ptr %a, float 0.000000e+00)
   ret float %call
 }

diff  --git a/llvm/test/Transforms/Inline/inline-funclets.ll b/llvm/test/Transforms/Inline/inline-funclets.ll
index dc67ffc33526e..fb4a94e7b07f2 100644
--- a/llvm/test/Transforms/Inline/inline-funclets.ll
+++ b/llvm/test/Transforms/Inline/inline-funclets.ll
@@ -7,7 +7,7 @@ declare void @g()
 ;;; Test with a call in a funclet that needs to remain a call
 ;;; when inlined because the funclet doesn't unwind to caller.
 ;;; CHECK-LABEL: define void @test1(
-define void @test1() personality void ()* @g {
+define void @test1() personality ptr @g {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test1_inlinee()
@@ -20,7 +20,7 @@ exit:
   ret void
 }
 
-define void @test1_inlinee() alwaysinline personality void ()* @g {
+define void @test1_inlinee() alwaysinline personality ptr @g {
 entry:
   invoke void @g()
     to label %exit unwind label %cleanup.inner
@@ -59,7 +59,7 @@ exit:
 ;;; that needs to remain "unwind to caller" because the parent
 ;;; doesn't unwind to caller.
 ;;; CHECK-LABEL: define void @test2(
-define void @test2() personality void ()* @g {
+define void @test2() personality ptr @g {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test2_inlinee()
@@ -72,7 +72,7 @@ exit:
   ret void
 }
 
-define void @test2_inlinee() alwaysinline personality void ()* @g {
+define void @test2_inlinee() alwaysinline personality ptr @g {
 entry:
   invoke void @g()
     to label %exit unwind label %cleanup1
@@ -124,7 +124,7 @@ exit:
 ;;; Test with a call in a cleanup that has no definitive unwind
 ;;; destination, that must be rewritten to an invoke.
 ;;; CHECK-LABEL: define void @test3(
-define void @test3() personality void ()* @g {
+define void @test3() personality ptr @g {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test3_inlinee()
@@ -137,7 +137,7 @@ exit:
   ret void
 }
 
-define void @test3_inlinee() alwaysinline personality void ()* @g {
+define void @test3_inlinee() alwaysinline personality ptr @g {
 entry:
   invoke void @g()
     to label %exit unwind label %cleanup
@@ -164,7 +164,7 @@ exit:
 ;;; unwind destination, that must be rewritten to unwind to the
 ;;; inlined invoke's unwind dest
 ;;; CHECK-LABEL: define void @test4(
-define void @test4() personality void ()* @g {
+define void @test4() personality ptr @g {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test4_inlinee()
@@ -177,7 +177,7 @@ exit:
   ret void
 }
 
-define void @test4_inlinee() alwaysinline personality void ()* @g {
+define void @test4_inlinee() alwaysinline personality ptr @g {
 entry:
   invoke void @g()
     to label %exit unwind label %cleanup
@@ -214,7 +214,7 @@ exit:
 ;;; that need to be inferred from ancestors, descendants,
 ;;; and cousins.
 ;;; CHECK-LABEL: define void @test5(
-define void @test5() personality void ()* @g {
+define void @test5() personality ptr @g {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test5_inlinee()
@@ -227,7 +227,7 @@ exit:
   ret void
 }
 
-define void @test5_inlinee() alwaysinline personality void ()* @g {
+define void @test5_inlinee() alwaysinline personality ptr @g {
 entry:
   invoke void @g()
     to label %cont unwind label %noinfo.root
@@ -416,7 +416,7 @@ exit:
 ;;; unwinds don't trip up processing of the ancestor nodes (left and root) that
 ;;; ultimately have no information.
 ;;; CHECK-LABEL: define void @test6(
-define void @test6() personality void()* @ProcessCLRException {
+define void @test6() personality ptr @ProcessCLRException {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test6_inlinee()
@@ -429,7 +429,7 @@ exit:
   ret void
 }
 
-define void @test6_inlinee() alwaysinline personality void ()* @ProcessCLRException {
+define void @test6_inlinee() alwaysinline personality ptr @ProcessCLRException {
 entry:
   invoke void @g()
     to label %exit unwind label %root
@@ -531,7 +531,7 @@ exit:
 ;;; unwinds to another cousin (left.right); make sure we don't trip over this
 ;;; when propagating unwind destination info to "right".
 ;;; CHECK-LABEL: define void @test7(
-define void @test7() personality void()* @ProcessCLRException {
+define void @test7() personality ptr @ProcessCLRException {
 entry:
 ; CHECK-NEXT: entry:
   invoke void @test7_inlinee()
@@ -544,7 +544,7 @@ exit:
   ret void
 }
 
-define void @test7_inlinee() alwaysinline personality void ()* @ProcessCLRException {
+define void @test7_inlinee() alwaysinline personality ptr @ProcessCLRException {
 entry:
   invoke void @g()
     to label %exit unwind label %root
@@ -636,7 +636,7 @@ declare void @ProcessCLRException()
 ; Make sure the logic doesn't get tripped up when the inlined invoke is
 ; itself within a funclet in the caller.
 ; CHECK-LABEL: define void @test8(
-define void @test8() personality void ()* @ProcessCLRException {
+define void @test8() personality ptr @ProcessCLRException {
 entry:
   invoke void @g()
     to label %exit unwind label %callsite_parent
@@ -655,7 +655,7 @@ exit:
   ret void
 }
 
-define void @test8_inlinee() alwaysinline personality void ()* @ProcessCLRException {
+define void @test8_inlinee() alwaysinline personality ptr @ProcessCLRException {
 entry:
   invoke void @g()
     to label %exit unwind label %inlinee_cleanup

diff  --git a/llvm/test/Transforms/Inline/inline-hot-callsite.ll b/llvm/test/Transforms/Inline/inline-hot-callsite.ll
index 5344d6d92ae78..f5877cb2c4584 100644
--- a/llvm/test/Transforms/Inline/inline-hot-callsite.ll
+++ b/llvm/test/Transforms/Inline/inline-hot-callsite.ll
@@ -40,7 +40,7 @@ define i32 @caller2(i32 %y1) {
 
 declare i32 @__gxx_personality_v0(...)
 
-define i32 @invoker2(i32 %y1) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @invoker2(i32 %y1) personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @invoker2(
 ; CHECK: invoke i32 @callee2
 ; CHECK-NOT: invoke i32 @callee1
@@ -54,11 +54,11 @@ exit:
   ret i32 1
 
 lpad:
-  %ll = landingpad { i8*, i32 } cleanup
+  %ll = landingpad { ptr, i32 } cleanup
   ret i32 1
 }
 
-define i32 @invoker3(i32 %y1) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @invoker3(i32 %y1) personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @invoker3(
 ; CHECK: invoke i32 @callee2
 ; CHECK-NOT: invoke i32 @callee1
@@ -74,11 +74,11 @@ exit:
   ret i32 1
 
 lpad:
-  %ll = landingpad { i8*, i32 } cleanup
+  %ll = landingpad { ptr, i32 } cleanup
   ret i32 1
 }
 
-define i32 @invoker4(i32 %y1) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @invoker4(i32 %y1) personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @invoker4(
 ; CHECK: invoke i32 @callee2
 ; CHECK-NOT: invoke i32 @callee1
@@ -94,7 +94,7 @@ exit:
   ret i32 1
 
 lpad:
-  %ll = landingpad { i8*, i32 } cleanup
+  %ll = landingpad { ptr, i32 } cleanup
   ret i32 1
 }
 

diff  --git a/llvm/test/Transforms/Inline/inline-indirect-chain.ll b/llvm/test/Transforms/Inline/inline-indirect-chain.ll
index 4377c32ab868f..4083ad6d30cae 100644
--- a/llvm/test/Transforms/Inline/inline-indirect-chain.ll
+++ b/llvm/test/Transforms/Inline/inline-indirect-chain.ll
@@ -2,20 +2,17 @@
 ; This test used to crash (PR35469).
 
 define void @func1() {
-  %t = bitcast void ()* @func2 to void ()*
-  tail call void %t()
+  tail call void @func2()
   ret void
 }
 
 define void @func2() {
-  %t = bitcast void ()* @func3 to void ()*
-  tail call void %t()
+  tail call void @func3()
   ret void
 }
 
 define void @func3() {
-  %t = bitcast void ()* @func4 to void ()*
-  tail call void %t()
+  tail call void @func4()
   ret void
 }
 
@@ -23,8 +20,7 @@ define void @func4() {
   br i1 undef, label %left, label %right
 
 left:
-  %t = bitcast void ()* @func5 to void ()*
-  tail call void %t()
+  tail call void @func5()
   ret void
 
 right:
@@ -32,24 +28,21 @@ right:
 }
 
 define void @func5() {
-  %t = bitcast void ()* @func6 to void ()*
-  tail call void %t()
+  tail call void @func6()
   ret void
 }
 
 define void @func6() {
-  %t = bitcast void ()* @func2 to void ()*
-  tail call void %t()
+  tail call void @func2()
   ret void
 }
 
 define void @func7() {
-  %t = bitcast void ()* @func3 to void ()*
-  tail call void @func8(void()* %t)
+  tail call void @func8(ptr @func3)
   ret void
 }
 
-define void @func8(void()* %f) {
+define void @func8(ptr %f) {
   tail call void %f()
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/inline-indirect.ll b/llvm/test/Transforms/Inline/inline-indirect.ll
index c8f01b4dd30f4..1f0ef41e5da9e 100644
--- a/llvm/test/Transforms/Inline/inline-indirect.ll
+++ b/llvm/test/Transforms/Inline/inline-indirect.ll
@@ -7,13 +7,13 @@ define void @foo() {
   ret void
 }
 
-define void @bar(void ()*) {
+define void @bar(ptr) {
   call void @llvm.assume(i1 true)
   call void %0();
   ret void
 }
 
 define void @baz() {
-  call void @bar(void ()* @foo)
+  call void @bar(ptr @foo)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/inline-invoke-tail.ll b/llvm/test/Transforms/Inline/inline-invoke-tail.ll
index 9e77cd6edcf41..553286459bf1e 100644
--- a/llvm/test/Transforms/Inline/inline-invoke-tail.ll
+++ b/llvm/test/Transforms/Inline/inline-invoke-tail.ll
@@ -1,40 +1,38 @@
-; RUN: opt < %s -passes=inline -S | not grep "tail call void @llvm.memcpy.p0i8.p0i8.i32"
+; RUN: opt < %s -passes=inline -S | not grep "tail call void @llvm.memcpy.p0.p0.i32"
 ; PR3550
 
-define internal void @foo(i32* %p, i32* %q) {
+define internal void @foo(ptr %p, ptr %q) {
 ; CHECK-NOT: @foo
 entry:
-  %pp = bitcast i32* %p to i8*
-  %qq = bitcast i32* %q to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %pp, i8* %qq, i32 4, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %p, ptr %q, i32 4, i1 false)
   ret void
 }
 
-define i32 @main() personality i32 (...)* @__gxx_personality_v0 {
-; CHECK-LABEL: define i32 @main() personality i32 (...)* @__gxx_personality_v0
+define i32 @main() personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: define i32 @main() personality ptr @__gxx_personality_v0
 entry:
   %a = alloca i32
   %b = alloca i32
-  store i32 1, i32* %a, align 4
-  store i32 0, i32* %b, align 4
-  invoke void @foo(i32* %a, i32* %b)
+  store i32 1, ptr %a, align 4
+  store i32 0, ptr %b, align 4
+  invoke void @foo(ptr %a, ptr %b)
       to label %invcont unwind label %lpad
 ; CHECK-NOT: invoke
 ; CHECK-NOT: @foo
 ; CHECK-NOT: tail
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32
+; CHECK: call void @llvm.memcpy.p0.p0.i32
 ; CHECK: br
 
 invcont:
-  %retval = load i32, i32* %a, align 4
+  %retval = load i32, ptr %a, align 4
   ret i32 %retval
 
 lpad:
-  %exn = landingpad {i8*, i32}
-         catch i8* null
+  %exn = landingpad {ptr, i32}
+         catch ptr null
   unreachable
 }
 
 declare i32 @__gxx_personality_v0(...)
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/Transforms/Inline/inline-invoke-with-asm-call.ll b/llvm/test/Transforms/Inline/inline-invoke-with-asm-call.ll
index da767f109f2ab..51abbc74a5314 100644
--- a/llvm/test/Transforms/Inline/inline-invoke-with-asm-call.ll
+++ b/llvm/test/Transforms/Inline/inline-invoke-with-asm-call.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin"
 ; Make sure we are generating "call asm" instead of "invoke asm".
 ; CHECK: call void asm
 ; CHECK-LABEL: @callee_with_asm
-define void @caller() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
+define void @caller() personality ptr @__objc_personality_v0 {
   br i1 undef, label %1, label %4
 
 ; <label>:1
@@ -17,9 +17,9 @@ define void @caller() personality i8* bitcast (i32 (...)* @__objc_personality_v0
           to label %4 unwind label %2
 
 ; <label>:2
-  %3 = landingpad { i8*, i32 }
+  %3 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 
 ; <label>:4
   ret void

diff  --git a/llvm/test/Transforms/Inline/inline-optsize.ll b/llvm/test/Transforms/Inline/inline-optsize.ll
index c7cd9b3189d19..0395b6a69d959 100644
--- a/llvm/test/Transforms/Inline/inline-optsize.ll
+++ b/llvm/test/Transforms/Inline/inline-optsize.ll
@@ -13,15 +13,15 @@
 ; smaller than the inline threshold for optsize (75).
 define i32 @inner() {
   call void @extern()
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a1
-  %a2 = load volatile i32, i32* @a
+  %a2 = load volatile i32, ptr @a
   %x2 = add i32 %x1, %a2
-  %a3 = load volatile i32, i32* @a
+  %a3 = load volatile i32, ptr @a
   %x3 = add i32 %x2, %a3
-  %a4 = load volatile i32, i32* @a
+  %a4 = load volatile i32, ptr @a
   %x4 = add i32 %x3, %a4
-  %a5 = load volatile i32, i32* @a
+  %a5 = load volatile i32, ptr @a
   %x5 = add i32 %x3, %a5
   ret i32 %x5
 }

diff  --git a/llvm/test/Transforms/Inline/inline-ptrtoint-
diff erent-sizes.ll b/llvm/test/Transforms/Inline/inline-ptrtoint-
diff erent-sizes.ll
index 5bcc9669a0beb..c4fab0fc3f421 100644
--- a/llvm/test/Transforms/Inline/inline-ptrtoint-
diff erent-sizes.ll
+++ b/llvm/test/Transforms/Inline/inline-ptrtoint-
diff erent-sizes.ll
@@ -7,34 +7,34 @@
 target datalayout = "p:16:16"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @pr47969_help(i16* %p) {
-  %cast = ptrtoint i16* %p to i32
+define void @pr47969_help(ptr %p) {
+  %cast = ptrtoint ptr %p to i32
   %sub = sub i32 %cast, %cast
   %conv = trunc i32 %sub to i16
   ret void
 }
 
-define void @pr47969(i16* %x) {
-  call void @pr47969_help(i16* %x)
+define void @pr47969(ptr %x) {
+  call void @pr47969_help(ptr %x)
   ret void
 }
 
-; CHECK-LABEL: @pr47969(i16* %x)
+; CHECK-LABEL: @pr47969(ptr %x)
 ; CHECK-NOT:     call
 ; CHECK:         ret void
 
-define void @pr38500_help(i16* %p) {
-  %cast = ptrtoint i16* %p to i32
+define void @pr38500_help(ptr %p) {
+  %cast = ptrtoint ptr %p to i32
   %sub = sub i32 %cast, %cast
   %cmp = icmp eq i32 %sub, 0
   ret void
 }
 
-define void @pr38500(i16* %x) {
-  call void @pr38500_help(i16* %x)
+define void @pr38500(ptr %x) {
+  call void @pr38500_help(ptr %x)
   ret void
 }
 
-; CHECK-LABEL: @pr38500(i16* %x)
+; CHECK-LABEL: @pr38500(ptr %x)
 ; CHECK-NOT:     call
 ; CHECK:         ret void

diff  --git a/llvm/test/Transforms/Inline/inline-recur-stacksize.ll b/llvm/test/Transforms/Inline/inline-recur-stacksize.ll
index c68fa82ec6c74..608075807b237 100644
--- a/llvm/test/Transforms/Inline/inline-recur-stacksize.ll
+++ b/llvm/test/Transforms/Inline/inline-recur-stacksize.ll
@@ -2,14 +2,13 @@
 ; RUN: opt < %s -passes=inline -S | FileCheck --check-prefixes=ALL,UNLIMITED %s
 ; RUN: opt < %s -passes=inline -S -recursive-inline-max-stacksize=256 | FileCheck --check-prefixes=ALL,LIMITED %s
 
-declare void @init([65 x i32]*)
+declare void @init(ptr)
 
 define internal i32 @foo() {
   %1 = alloca [65 x i32], align 16
-  %2 = getelementptr inbounds [65 x i32], [65 x i32]* %1, i65 0, i65 0
-  call void @init([65 x i32]* %1)
-  %3 = load i32, i32* %2, align 4
-  ret i32 %3
+  call void @init(ptr %1)
+  %2 = load i32, ptr %1, align 4
+  ret i32 %2
 }
 
 define i32 @bar() {

diff  --git a/llvm/test/Transforms/Inline/inline-remark-mandatory.ll b/llvm/test/Transforms/Inline/inline-remark-mandatory.ll
index 1b4a022a214f5..168844e86ff74 100644
--- a/llvm/test/Transforms/Inline/inline-remark-mandatory.ll
+++ b/llvm/test/Transforms/Inline/inline-remark-mandatory.ll
@@ -3,11 +3,11 @@
 declare void @personalityFn1();
 declare void @personalityFn2();
 
-define i32 @a() personality void ()* @personalityFn1 {
+define i32 @a() personality ptr @personalityFn1 {
     ret i32 1
 }
 
-define i32 @b() personality void ()* @personalityFn2 {
+define i32 @b() personality ptr @personalityFn2 {
     %r = call i32 @a() alwaysinline
     ret i32 %r
 }

diff  --git a/llvm/test/Transforms/Inline/inline-remark.ll b/llvm/test/Transforms/Inline/inline-remark.ll
index 49e597b391699..166c7bb065924 100644
--- a/llvm/test/Transforms/Inline/inline-remark.ll
+++ b/llvm/test/Transforms/Inline/inline-remark.ll
@@ -37,7 +37,7 @@ define void @noop() {
 }
 
 ;; Test 2 - Printed InlineResult messages are followed by InlineCost.
-define void @test2(i8*) {
+define void @test2(ptr) {
 ; CHECK-LABEL: @test2
 ; CHECK-NEXT: call void @noop() [[ATTR3:#[0-9]+]] [ "CUSTOM_OPERAND_BUNDLE"() ]
 ; CHECK-NEXT: ret void

diff  --git a/llvm/test/Transforms/Inline/inline-retainRV-call.ll b/llvm/test/Transforms/Inline/inline-retainRV-call.ll
index d757bd0326792..87b19a70b0a5f 100644
--- a/llvm/test/Transforms/Inline/inline-retainRV-call.ll
+++ b/llvm/test/Transforms/Inline/inline-retainRV-call.ll
@@ -1,150 +1,150 @@
 ; RUN: opt < %s -passes=inline -S | FileCheck %s
 
- at g0 = global i8* null, align 8
-declare i8* @foo0()
+ at g0 = global ptr null, align 8
+declare ptr @foo0()
 
-define i8* @callee0_autoreleaseRV() {
-  %call = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-  %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
-  ret i8* %call
+define ptr @callee0_autoreleaseRV() {
+  %call = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+  %1 = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %call)
+  ret ptr %call
 }
 
 ; CHECK-LABEL: define void @test0_autoreleaseRV(
-; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
 
 define void @test0_autoreleaseRV() {
-  %call = call i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = call ptr @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
   ret void
 }
 
 ; CHECK-LABEL: define void @test0_claimRV_autoreleaseRV(
-; CHECK: %[[CALL:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK: call void @llvm.objc.release(i8* %[[CALL]])
+; CHECK: %[[CALL:.*]] = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: call void @llvm.objc.release(ptr %[[CALL]])
 ; CHECK-NEXT: ret void
 
 define void @test0_claimRV_autoreleaseRV() {
-  %call = call i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+  %call = call ptr @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
   ret void
 }
 
 ; CHECK-LABEL: define void @test1_autoreleaseRV(
-; CHECK: invoke i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: invoke ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
 
-define void @test1_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test1_autoreleaseRV() personality ptr @__gxx_personality_v0 {
 entry:
-  %call = invoke i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = invoke ptr @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:
   ret void
 
 lpad:
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
 ; CHECK-LABEL: define void @test1_claimRV_autoreleaseRV(
-; CHECK: %[[INVOKE:.*]] = invoke i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK: call void @llvm.objc.release(i8* %[[INVOKE]])
+; CHECK: %[[INVOKE:.*]] = invoke ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: call void @llvm.objc.release(ptr %[[INVOKE]])
 ; CHECK-NEXT: br
 
-define void @test1_claimRV_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test1_claimRV_autoreleaseRV() personality ptr @__gxx_personality_v0 {
 entry:
-  %call = invoke i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+  %call = invoke ptr @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:
   ret void
 
 lpad:
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-define i8* @callee1_no_autoreleaseRV() {
-  %call = call i8* @foo0()
-  ret i8* %call
+define ptr @callee1_no_autoreleaseRV() {
+  %call = call ptr @foo0()
+  ret ptr %call
 }
 
 ; CHECK-LABEL: define void @test2_no_autoreleaseRV(
-; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
 ; CHECK-NEXT: ret void
 
 define void @test2_no_autoreleaseRV() {
-  %call = call i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = call ptr @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
   ret void
 }
 
 ; CHECK-LABEL: define void @test2_claimRV_no_autoreleaseRV(
-; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+; CHECK: call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
 ; CHECK-NEXT: ret void
 
 define void @test2_claimRV_no_autoreleaseRV() {
-  %call = call i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+  %call = call ptr @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
   ret void
 }
 
 ; CHECK-LABEL: define void @test3_no_autoreleaseRV(
-; CHECK: invoke i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: invoke ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
 
-define void @test3_no_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test3_no_autoreleaseRV() personality ptr @__gxx_personality_v0 {
 entry:
-  %call = invoke i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = invoke ptr @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:
   ret void
 
 lpad:
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-define i8* @callee2_nocall() {
-  %1 = load i8*, i8** @g0, align 8
-  ret i8* %1
+define ptr @callee2_nocall() {
+  %1 = load ptr, ptr @g0, align 8
+  ret ptr %1
 }
 
 ; Check that a call to @llvm.objc.retain is inserted if there is no matching
 ; autoreleaseRV call or a call.
 
 ; CHECK-LABEL: define void @test4_nocall(
-; CHECK: %[[V0:.*]] = load i8*, i8** @g0,
-; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK: %[[V0:.*]] = load ptr, ptr @g0,
+; CHECK-NEXT: call ptr @llvm.objc.retain(ptr %[[V0]])
 ; CHECK-NEXT: ret void
 
 define void @test4_nocall() {
-  %call = call i8* @callee2_nocall() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = call ptr @callee2_nocall() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
   ret void
 }
 
 ; CHECK-LABEL: define void @test4_claimRV_nocall(
-; CHECK: %[[V0:.*]] = load i8*, i8** @g0,
+; CHECK: %[[V0:.*]] = load ptr, ptr @g0,
 ; CHECK-NEXT: ret void
 
 define void @test4_claimRV_nocall() {
-  %call = call i8* @callee2_nocall() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
+  %call = call ptr @callee2_nocall() [ "clang.arc.attachedcall"(ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue) ]
   ret void
 }
 
 ; Check that a call to @llvm.objc.retain is inserted if call to @foo already has
 ; the attribute. I'm not sure this will happen in practice.
 
-define i8* @callee3_marker() {
-  %1 = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-  ret i8* %1
+define ptr @callee3_marker() {
+  %1 = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+  ret ptr %1
 }
 
 ; CHECK-LABEL: define void @test5(
-; CHECK: %[[V0:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK: %[[V0:.*]] = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK-NEXT: call ptr @llvm.objc.retain(ptr %[[V0]])
 ; CHECK-NEXT: ret void
 
 define void @test5() {
-  %call = call i8* @callee3_marker() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = call ptr @callee3_marker() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
   ret void
 }
 
@@ -152,26 +152,26 @@ define void @test5() {
 ; if there is an instruction between the ret instruction and the call to
 ; autoreleaseRV that isn't a cast instruction.
 
-define i8* @callee0_autoreleaseRV2() {
-  %call = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-  %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
-  store i8* null, i8** @g0
-  ret i8* %call
+define ptr @callee0_autoreleaseRV2() {
+  %call = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+  %1 = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %call)
+  store ptr null, ptr @g0
+  ret ptr %call
 }
 
 ; CHECK-LABEL: define void @test6(
-; CHECK: %[[V0:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK: call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[V0]])
-; CHECK: store i8* null, i8** @g0, align 8
-; CHECK: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK: %[[V0:.*]] = call ptr @foo0() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK: call ptr @llvm.objc.autoreleaseReturnValue(ptr %[[V0]])
+; CHECK: store ptr null, ptr @g0, align 8
+; CHECK: call ptr @llvm.objc.retain(ptr %[[V0]])
 ; CHECK-NEXT: ret void
 
 define void @test6() {
-  %call = call i8* @callee0_autoreleaseRV2() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  %call = call ptr @callee0_autoreleaseRV2() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
   ret void
 }
 
-declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
-declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
-declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr)
+declare ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr)
+declare ptr @llvm.objc.autoreleaseReturnValue(ptr)
 declare i32 @__gxx_personality_v0(...)

diff  --git a/llvm/test/Transforms/Inline/inline-stacksize.ll b/llvm/test/Transforms/Inline/inline-stacksize.ll
index f2fef0c00d5b4..5c1d2a4d33560 100644
--- a/llvm/test/Transforms/Inline/inline-stacksize.ll
+++ b/llvm/test/Transforms/Inline/inline-stacksize.ll
@@ -2,14 +2,13 @@
 ; RUN: opt < %s -passes=inline -S | FileCheck --check-prefixes=ALL,UNLIMITED %s
 ; RUN: opt < %s -passes=inline -S -inline-max-stacksize=256 | FileCheck --check-prefixes=ALL,LIMITED %s
 
-declare void @init([65 x i32]*)
+declare void @init(ptr)
 
 define internal i32 @foo() {
   %1 = alloca [65 x i32], align 16
-  %2 = getelementptr inbounds [65 x i32], [65 x i32]* %1, i65 0, i65 0
-  call void @init([65 x i32]* %1)
-  %3 = load i32, i32* %2, align 4
-  ret i32 %3
+  call void @init(ptr %1)
+  %2 = load i32, ptr %1, align 4
+  ret i32 %2
 }
 
 define i32 @barNoAttr() {

diff  --git a/llvm/test/Transforms/Inline/inline-threshold.ll b/llvm/test/Transforms/Inline/inline-threshold.ll
index d2b9c6671af09..8d9bca1487b2d 100644
--- a/llvm/test/Transforms/Inline/inline-threshold.ll
+++ b/llvm/test/Transforms/Inline/inline-threshold.ll
@@ -8,7 +8,7 @@
 
 define i32 @simpleFunction(i32 %a) #0 {
 entry:
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a
   ret i32 %x1
 }

diff  --git a/llvm/test/Transforms/Inline/inline-varargs.ll b/llvm/test/Transforms/Inline/inline-varargs.ll
index a3f85936eaa02..d2073081e79ae 100644
--- a/llvm/test/Transforms/Inline/inline-varargs.ll
+++ b/llvm/test/Transforms/Inline/inline-varargs.ll
@@ -1,76 +1,74 @@
 ; RUN: opt < %s -passes=inline -S | FileCheck %s
 ; RUN: opt < %s -passes='cgscc(inline,function(instcombine))' -S | FileCheck %s
 
-declare void @ext_method(i8*, i32)
+declare void @ext_method(ptr, i32)
 declare signext i16 @vararg_fn(...) #0
-declare "cc 9" void @vararg_fn_cc9(i8* %p, ...)
+declare "cc 9" void @vararg_fn_cc9(ptr %p, ...)
 
-define linkonce_odr void @thunk(i8* %this, ...) {
-  %this_adj = getelementptr i8, i8* %this, i32 4
-  musttail call void (i8*, ...) bitcast (void (i8*, i32)* @ext_method to void (i8*, ...)*)(i8* nonnull %this_adj, ...)
+define linkonce_odr void @thunk(ptr %this, ...) {
+  %this_adj = getelementptr i8, ptr %this, i32 4
+  musttail call void (ptr, ...) @ext_method(ptr nonnull %this_adj, ...)
   ret void
 }
 
-define void @thunk_caller(i8* %p) {
-  call void (i8*, ...) @thunk(i8* %p, i32 42)
+define void @thunk_caller(ptr %p) {
+  call void (ptr, ...) @thunk(ptr %p, i32 42)
   ret void
 }
-; CHECK-LABEL: define void @thunk_caller(i8* %p)
-; CHECK: call void (i8*, ...) bitcast (void (i8*, i32)* @ext_method to void (i8*, ...)*)(i8* nonnull %this_adj.i, i32 42)
+; CHECK-LABEL: define void @thunk_caller(ptr %p)
+; CHECK: call void (ptr, ...) @ext_method(ptr nonnull %this_adj.i, i32 42)
 
 define signext i16 @test_callee_2(...) {
   %res = musttail call signext i16 (...) @vararg_fn(...) #0
   ret i16 %res
 }
 
-define void @test_caller_2(i8* %p, i8* %q, i16 %r) {
-  call signext i16 (...) @test_callee_2(i8* %p, i8* byval(i8) %q, i16 signext %r)
+define void @test_caller_2(ptr %p, ptr %q, i16 %r) {
+  call signext i16 (...) @test_callee_2(ptr %p, ptr byval(i8) %q, i16 signext %r)
   ret void
 }
 ; CHECK-LABEL: define void @test_caller_2
-; CHECK: call signext i16 (...) @vararg_fn(i8* %p, i8* byval(i8) %q, i16 signext %r) [[FN_ATTRS:#[0-9]+]]
+; CHECK: call signext i16 (...) @vararg_fn(ptr %p, ptr byval(i8) %q, i16 signext %r) [[FN_ATTRS:#[0-9]+]]
 
-define void @test_callee_3(i8* %p, ...) {
+define void @test_callee_3(ptr %p, ...) {
   call signext i16 (...) @vararg_fn()
   ret void
 }
 
-define void @test_caller_3(i8* %p, i8* %q) {
-  call void (i8*, ...) @test_callee_3(i8* nonnull %p, i8* %q)
+define void @test_caller_3(ptr %p, ptr %q) {
+  call void (ptr, ...) @test_callee_3(ptr nonnull %p, ptr %q)
   ret void
 }
 ; CHECK-LABEL: define void @test_caller_3
 ; CHECK: call signext i16 (...) @vararg_fn()
 
-define void @test_preserve_cc(i8* %p, ...) {
-  musttail call "cc 9" void (i8*, ...) @vararg_fn_cc9(i8* %p, ...)
+define void @test_preserve_cc(ptr %p, ...) {
+  musttail call "cc 9" void (ptr, ...) @vararg_fn_cc9(ptr %p, ...)
   ret void
 }
 
-define void @test_caller_preserve_cc(i8* %p, i8* %q) {
-  call void (i8*, ...) @test_preserve_cc(i8* %p, i8* %q)
+define void @test_caller_preserve_cc(ptr %p, ptr %q) {
+  call void (ptr, ...) @test_preserve_cc(ptr %p, ptr %q)
   ret void
 }
 ; CHECK-LABEL: define void @test_caller_preserve_cc
-; CHECK: call "cc 9" void (i8*, ...) @vararg_fn_cc9(i8* %p, i8* %q)
+; CHECK: call "cc 9" void (ptr, ...) @vararg_fn_cc9(ptr %p, ptr %q)
 
 define internal i32 @varg_accessed(...) {
 entry:
-  %vargs = alloca i8*, align 8
-  %vargs.ptr = bitcast i8** %vargs to i8*
-  call void @llvm.va_start(i8* %vargs.ptr)
-  %va1 = va_arg i8** %vargs, i32
-  call void @llvm.va_end(i8* %vargs.ptr)
+  %vargs = alloca ptr, align 8
+  call void @llvm.va_start(ptr %vargs)
+  %va1 = va_arg ptr %vargs, i32
+  call void @llvm.va_end(ptr %vargs)
   ret i32 %va1
 }
 
 define internal i32 @varg_accessed_alwaysinline(...) alwaysinline {
 entry:
-  %vargs = alloca i8*, align 8
-  %vargs.ptr = bitcast i8** %vargs to i8*
-  call void @llvm.va_start(i8* %vargs.ptr)
-  %va1 = va_arg i8** %vargs, i32
-  call void @llvm.va_end(i8* %vargs.ptr)
+  %vargs = alloca ptr, align 8
+  call void @llvm.va_start(ptr %vargs)
+  %va1 = va_arg ptr %vargs, i32
+  call void @llvm.va_end(ptr %vargs)
   ret i32 %va1
 }
 
@@ -84,28 +82,26 @@ define i32 @call_vargs() {
 ; CHECK: %res1 = call i32 (...) @varg_accessed(i32 10)
 ; CHECK-NEXT: %res2 = call i32 (...) @varg_accessed_alwaysinline(i32 15)
 
-define void @caller_with_vastart(i8* noalias nocapture readnone %args, ...) {
+define void @caller_with_vastart(ptr noalias nocapture readnone %args, ...) {
 entry:
-  %ap = alloca i8*, align 4
-  %ap.ptr = bitcast i8** %ap to i8*
-  %ap2 = alloca i8*, align 4
-  %ap2.ptr = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* nonnull %ap.ptr)
-  call fastcc void @callee_with_vaend(i8* nonnull %ap.ptr)
-  call void @llvm.va_start(i8* nonnull %ap2.ptr)
-  call fastcc void @callee_with_vaend_alwaysinline(i8* nonnull %ap2.ptr)
+  %ap = alloca ptr, align 4
+  %ap2 = alloca ptr, align 4
+  call void @llvm.va_start(ptr nonnull %ap)
+  call fastcc void @callee_with_vaend(ptr nonnull %ap)
+  call void @llvm.va_start(ptr nonnull %ap)
+  call fastcc void @callee_with_vaend_alwaysinline(ptr nonnull %ap)
   ret void
 }
 
-define internal fastcc void @callee_with_vaend_alwaysinline(i8* %a) alwaysinline {
+define internal fastcc void @callee_with_vaend_alwaysinline(ptr %a) alwaysinline {
 entry:
-  tail call void @llvm.va_end(i8* %a)
+  tail call void @llvm.va_end(ptr %a)
   ret void
 }
 
-define internal fastcc void @callee_with_vaend(i8* %a) {
+define internal fastcc void @callee_with_vaend(ptr %a) {
 entry:
-  tail call void @llvm.va_end(i8* %a)
+  tail call void @llvm.va_end(ptr %a)
   ret void
 }
 
@@ -113,8 +109,8 @@ entry:
 ; CHECK-NOT: @callee_with_vaend
 ; CHECK-NOT: @callee_with_vaend_alwaysinline
 
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
 
 ; CHECK: attributes [[FN_ATTRS]] = { "foo"="bar" }
 attributes #0 = { "foo"="bar" }

diff  --git a/llvm/test/Transforms/Inline/inline-vla.ll b/llvm/test/Transforms/Inline/inline-vla.ll
index 750543ee89f6f..8e4bb3d140bca 100644
--- a/llvm/test/Transforms/Inline/inline-vla.ll
+++ b/llvm/test/Transforms/Inline/inline-vla.ll
@@ -9,27 +9,25 @@
 @.str1 = private unnamed_addr constant [3 x i8] c"ab\00", align 1
 
 ; Function Attrs: nounwind ssp uwtable
-define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) #0 {
 entry:
   %data = alloca [2 x i8], align 1
-  %arraydecay = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 0
-  call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i64 0, i64 0), i64 1)
-  call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str1, i64 0, i64 0), i64 2)
+  call fastcc void @memcpy2(ptr %data, ptr @.str, i64 1)
+  call fastcc void @memcpy2(ptr %data, ptr @.str1, i64 2)
   ret i32 0
 }
 
 ; Function Attrs: inlinehint nounwind ssp uwtable
-define internal fastcc void @memcpy2(i8* nocapture %dst, i8* nocapture readonly %src, i64 %size) #1 {
+define internal fastcc void @memcpy2(ptr nocapture %dst, ptr nocapture readonly %src, i64 %size) #1 {
 entry:
   %vla = alloca i64, i64 %size, align 16
-  %0 = bitcast i64* %vla to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %src, i64 %size, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %0, i64 %size, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %vla, ptr %src, i64 %size, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %vla, i64 %size, i1 false)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
 
 attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { inlinehint nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/Transforms/Inline/inline_call.ll b/llvm/test/Transforms/Inline/inline_call.ll
index fb000f0c805f2..366e91fc8f5eb 100644
--- a/llvm/test/Transforms/Inline/inline_call.ll
+++ b/llvm/test/Transforms/Inline/inline_call.ll
@@ -1,14 +1,13 @@
 ; Check the optimizer doesn't crash at inlining the function top and all of its callees are inlined.
 ; RUN: opt < %s -O3 -S | FileCheck %s
 
-define dso_local void (...)* @second(i8** %p) {
+define dso_local ptr @second(ptr %p) {
 entry:
-  %p.addr = alloca i8**, align 8
-  store i8** %p, i8*** %p.addr, align 8
-  %tmp = load i8**, i8*** %p.addr, align 8
-  %tmp1 = load i8*, i8** %tmp, align 8
-  %tmp2 = bitcast i8* %tmp1 to void (...)*
-  ret void (...)* %tmp2
+  %p.addr = alloca ptr, align 8
+  store ptr %p, ptr %p.addr, align 8
+  %tmp = load ptr, ptr %p.addr, align 8
+  %tmp1 = load ptr, ptr %tmp, align 8
+  ret ptr %tmp1
 }
 
 define dso_local void @top()  {
@@ -17,13 +16,13 @@ entry:
   ; CHECK-NOT: {{.*}} = {{.*}} call {{.*}} @third
   ; CHECK-NOT: {{.*}} = {{.*}} call {{.*}} @second
   ; CHECK-NOT: {{.*}} = {{.*}} call {{.*}} @wrapper
-  %q = alloca i8*, align 8
-  store i8* bitcast (void ()* @third to i8*), i8** %q, align 8
-  %tmp = call void (...)* @second(i8** %q)
+  %q = alloca ptr, align 8
+  store ptr @third, ptr %q, align 8
+  %tmp = call ptr @second(ptr %q)
   ; The call to 'wrapper' here is to ensure that its function attributes
   ; i.e., returning its parameter and having no side effect, will be decuded
   ; before the next round of inlining happens to 'top' to expose the bug.
-  %call =  call void (...)* @wrapper(void (...)* %tmp) 
+  %call =  call ptr @wrapper(ptr %tmp) 
   ; The indirect call here is to confuse the alias analyzer so that
   ; an incomplete graph will be built during the first round of inlining.
   ; This allows the current function to be processed before the actual 
@@ -34,38 +33,38 @@ entry:
   ret void
 }
 
-define dso_local void (...)* @gen() {
+define dso_local ptr @gen() {
 entry:
-  %call = call void (...)* (...) @ext()
-  ret void (...)* %call
+  %call = call ptr (...) @ext()
+  ret ptr %call
 }
 
-declare dso_local void (...)* @ext(...) 
+declare dso_local ptr @ext(...) 
 
-define dso_local void (...)* @wrapper(void (...)* %fn) {
+define dso_local ptr @wrapper(ptr %fn) {
 entry:
-  ret void (...)* %fn
+  ret ptr %fn
 }
 
-define dso_local void @run(void (...)* %fn) {
+define dso_local void @run(ptr %fn) {
 entry:
-  %fn.addr = alloca void (...)*, align 8
-  %f = alloca void (...)*, align 8
-  store void (...)* %fn, void (...)** %fn.addr, align 8
-  %tmp = load void (...)*, void (...)** %fn.addr, align 8
-  %call = call void (...)* @wrapper(void (...)* %tmp)
-  store void (...)* %call, void (...)** %f, align 8
-  %tmp1 = load void (...)*, void (...)** %f, align 8
+  %fn.addr = alloca ptr, align 8
+  %f = alloca ptr, align 8
+  store ptr %fn, ptr %fn.addr, align 8
+  %tmp = load ptr, ptr %fn.addr, align 8
+  %call = call ptr @wrapper(ptr %tmp)
+  store ptr %call, ptr %f, align 8
+  %tmp1 = load ptr, ptr %f, align 8
   call void (...) %tmp1()
   ret void
 }
 
 define dso_local void @third() {
 entry:
-  %f = alloca void (...)*, align 8
-  %call = call void (...)* @gen()
-  store void (...)* %call, void (...)** %f, align 8
-  %tmp = load void (...)*, void (...)** %f, align 8
-  call void @run(void (...)* %tmp)
+  %f = alloca ptr, align 8
+  %call = call ptr @gen()
+  store ptr %call, ptr %f, align 8
+  %tmp = load ptr, ptr %f, align 8
+  call void @run(ptr %tmp)
   ret void
 }
\ No newline at end of file

diff  --git a/llvm/test/Transforms/Inline/inline_cleanup.ll b/llvm/test/Transforms/Inline/inline_cleanup.ll
index 85d63e63d818a..d349e825b69a4 100644
--- a/llvm/test/Transforms/Inline/inline_cleanup.ll
+++ b/llvm/test/Transforms/Inline/inline_cleanup.ll
@@ -4,19 +4,19 @@
 ; RUN: opt < %s -passes=inline -S | FileCheck %s
 ; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
 
- at A = weak global i32 0		; <i32*> [#uses=1]
- at B = weak global i32 0		; <i32*> [#uses=1]
- at C = weak global i32 0		; <i32*> [#uses=1]
+ at A = weak global i32 0		; <ptr> [#uses=1]
+ at B = weak global i32 0		; <ptr> [#uses=1]
+ at C = weak global i32 0		; <ptr> [#uses=1]
 
 define internal fastcc void @foo(i32 %X) {
 entry:
-	%ALL = alloca i32, align 4		; <i32*> [#uses=1]
+	%ALL = alloca i32, align 4		; <ptr> [#uses=1]
 	%tmp1 = and i32 %X, 1		; <i32> [#uses=1]
 	%tmp1.upgrd.1 = icmp eq i32 %tmp1, 0		; <i1> [#uses=1]
 	br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
 
 cond_true:		; preds = %entry
-	store i32 1, i32* @A
+	store i32 1, ptr @A
 	br label %cond_next
 
 cond_next:		; preds = %cond_true, %entry
@@ -25,7 +25,7 @@ cond_next:		; preds = %cond_true, %entry
 	br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
 
 cond_true5:		; preds = %cond_next
-	store i32 1, i32* @B
+	store i32 1, ptr @B
 	br label %cond_next7
 
 cond_next7:		; preds = %cond_true5, %cond_next
@@ -34,7 +34,7 @@ cond_next7:		; preds = %cond_true5, %cond_next
 	br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
 
 cond_true11:		; preds = %cond_next7
-	store i32 1, i32* @C
+	store i32 1, ptr @C
 	br label %cond_next13
 
 cond_next13:		; preds = %cond_true11, %cond_next7
@@ -43,14 +43,14 @@ cond_next13:		; preds = %cond_true11, %cond_next7
 	br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
 
 cond_true17:		; preds = %cond_next13
-	call void @ext( i32* %ALL )
+	call void @ext( ptr %ALL )
 	ret void
 
 UnifiedReturnBlock:		; preds = %cond_next13
 	ret void
 }
 
-declare void @ext(i32*)
+declare void @ext(ptr)
 
 define void @test() {
 ; CHECK-LABEL: @test(

diff  --git a/llvm/test/Transforms/Inline/inline_constprop.ll b/llvm/test/Transforms/Inline/inline_constprop.ll
index 0b1872db79f05..66aa1e7b477f8 100644
--- a/llvm/test/Transforms/Inline/inline_constprop.ll
+++ b/llvm/test/Transforms/Inline/inline_constprop.ll
@@ -36,7 +36,7 @@ define i32 @callee21(i32 %x, i32 %y) {
   ret i32 %result
 }
 
-declare i8* @getptr()
+declare ptr @getptr()
 
 define i32 @callee22(i32 %x) {
   %icmp = icmp ugt i32 %x, 42
@@ -164,9 +164,8 @@ entry:
 }
 
 define i64 @callee5(i64 %x, i64 %y) {
-  %inttoptr = inttoptr i64 %x to i8*
-  %bitcast = bitcast i8* %inttoptr to i32*
-  %ptrtoint = ptrtoint i32* %bitcast to i64
+  %inttoptr = inttoptr i64 %x to ptr
+  %ptrtoint = ptrtoint ptr %inttoptr to i64
   %trunc = trunc i64 %ptrtoint to i32
   %zext = zext i32 %trunc to i64
   %cmp = icmp eq i64 %zext, 42
@@ -236,9 +235,8 @@ define i32 @PR13412.main() {
 
 entry:
   %i1 = alloca i64
-  store i64 0, i64* %i1
-  %arraydecay = bitcast i64* %i1 to i32*
-  %call = call i1 @PR13412.first(i32* %arraydecay, i32* %arraydecay)
+  store i64 0, ptr %i1
+  %call = call i1 @PR13412.first(ptr %i1, ptr %i1)
   br i1 %call, label %cond.end, label %cond.false
 
 cond.false:
@@ -249,27 +247,27 @@ cond.end:
   ret i32 0
 }
 
-define internal i1 @PR13412.first(i32* %a, i32* %b) {
+define internal i1 @PR13412.first(ptr %a, ptr %b) {
 entry:
-  %call = call i32* @PR13412.second(i32* %a, i32* %b)
-  %cmp = icmp eq i32* %call, %b
+  %call = call ptr @PR13412.second(ptr %a, ptr %b)
+  %cmp = icmp eq ptr %call, %b
   ret i1 %cmp
 }
 
 declare void @PR13412.fail()
 
-define internal i32* @PR13412.second(i32* %a, i32* %b) {
+define internal ptr @PR13412.second(ptr %a, ptr %b) {
 entry:
-  %sub.ptr.lhs.cast = ptrtoint i32* %b to i64
-  %sub.ptr.rhs.cast = ptrtoint i32* %a to i64
+  %sub.ptr.lhs.cast = ptrtoint ptr %b to i64
+  %sub.ptr.rhs.cast = ptrtoint ptr %a to i64
   %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
   %sub.ptr.div = ashr exact i64 %sub.ptr.sub, 2
   %cmp = icmp ugt i64 %sub.ptr.div, 1
   br i1 %cmp, label %if.then, label %if.end3
 
 if.then:
-  %0 = load i32, i32* %a
-  %1 = load i32, i32* %b
+  %0 = load i32, ptr %a
+  %1 = load i32, ptr %b
   %cmp1 = icmp eq i32 %0, %1
   br i1 %cmp1, label %return, label %if.end3
 
@@ -277,8 +275,8 @@ if.end3:
   br label %return
 
 return:
-  %retval.0 = phi i32* [ %b, %if.end3 ], [ %a, %if.then ]
-  ret i32* %retval.0
+  %retval.0 = phi ptr [ %b, %if.end3 ], [ %a, %if.then ]
+  ret ptr %retval.0
 }
 
 declare i32 @PR28802.external(i32 returned %p1)

diff  --git a/llvm/test/Transforms/Inline/inline_dbg_declare.ll b/llvm/test/Transforms/Inline/inline_dbg_declare.ll
index 5a3a313f978e7..0952d9f01cef0 100644
--- a/llvm/test/Transforms/Inline/inline_dbg_declare.ll
+++ b/llvm/test/Transforms/Inline/inline_dbg_declare.ll
@@ -14,7 +14,7 @@
 ;;    return x;
 ;; }
 ;;
-;; void bar(float *dst)
+;; void bar(ptr dst)
 ;; {
 ;;    dst[0] = foo(dst[0]);
 ;; }
@@ -27,9 +27,9 @@ target triple = "i686-pc-windows-msvc"
 define float @foo(float %x) #0 !dbg !4 {
 entry:
   %x.addr = alloca float, align 4
-  store float %x, float* %x.addr, align 4
-  call void @llvm.dbg.declare(metadata float* %x.addr, metadata !16, metadata !17), !dbg !18
-  %0 = load float, float* %x.addr, align 4, !dbg !19
+  store float %x, ptr %x.addr, align 4
+  call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !16, metadata !17), !dbg !18
+  %0 = load float, ptr %x.addr, align 4, !dbg !19
   ret float %0, !dbg !19
 }
 
@@ -39,26 +39,24 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
 ; CHECK: define void @bar
 
 ; Function Attrs: nounwind
-define void @bar(float* %dst) #0 !dbg !9 {
+define void @bar(ptr %dst) #0 !dbg !9 {
 entry:
 
 ; CHECK: [[x_addr_i:%.+]] = alloca float, align 4
-; CHECK: store float {{.*}}, float* [[x_addr_i]]
-; CHECK-NEXT: void @llvm.dbg.declare(metadata float* [[x_addr_i]], metadata [[m23:![0-9]+]], metadata !DIExpression()), !dbg [[m24:![0-9]+]]
+; CHECK: store float {{.*}}, ptr [[x_addr_i]]
+; CHECK-NEXT: void @llvm.dbg.declare(metadata ptr [[x_addr_i]], metadata [[m23:![0-9]+]], metadata !DIExpression()), !dbg [[m24:![0-9]+]]
 
-  %dst.addr = alloca float*, align 4
-  store float* %dst, float** %dst.addr, align 4
-  call void @llvm.dbg.declare(metadata float** %dst.addr, metadata !20, metadata !17), !dbg !21
-  %0 = load float*, float** %dst.addr, align 4, !dbg !22
-  %arrayidx = getelementptr inbounds float, float* %0, i32 0, !dbg !22
-  %1 = load float, float* %arrayidx, align 4, !dbg !22
+  %dst.addr = alloca ptr, align 4
+  store ptr %dst, ptr %dst.addr, align 4
+  call void @llvm.dbg.declare(metadata ptr %dst.addr, metadata !20, metadata !17), !dbg !21
+  %0 = load ptr, ptr %dst.addr, align 4, !dbg !22
+  %1 = load float, ptr %0, align 4, !dbg !22
   %call = call float @foo(float %1), !dbg !22
 
 ; CHECK-NOT: call float @foo
 
-  %2 = load float*, float** %dst.addr, align 4, !dbg !22
-  %arrayidx1 = getelementptr inbounds float, float* %2, i32 0, !dbg !22
-  store float %call, float* %arrayidx1, align 4, !dbg !22
+  %2 = load ptr, ptr %dst.addr, align 4, !dbg !22
+  store float %call, ptr %2, align 4, !dbg !22
   ret void, !dbg !23
 }
 

diff  --git a/llvm/test/Transforms/Inline/inline_inv_group.ll b/llvm/test/Transforms/Inline/inline_inv_group.ll
index 4346bb3e975a7..0be2c8eb05a98 100644
--- a/llvm/test/Transforms/Inline/inline_inv_group.ll
+++ b/llvm/test/Transforms/Inline/inline_inv_group.ll
@@ -3,17 +3,17 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i8* @callee() alwaysinline {
-; CHECK-LABEL: define i8* @callee()
-    %1 = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
-    ret i8* %1
+define ptr @callee() alwaysinline {
+; CHECK-LABEL: define ptr @callee()
+    %1 = call ptr @llvm.strip.invariant.group.p0(ptr null)
+    ret ptr %1
 }
 
-define i8* @caller() {
-; CHECK-LABEL: define i8* @caller()
-; CHECK-NEXT: call i8* @llvm.strip.invariant.group.p0i8(i8* null)
-    %1 = call i8* @callee()
-    ret i8* %1
+define ptr @caller() {
+; CHECK-LABEL: define ptr @caller()
+; CHECK-NEXT: call ptr @llvm.strip.invariant.group.p0(ptr null)
+    %1 = call ptr @callee()
+    ret ptr %1
 }
 
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.strip.invariant.group.p0(ptr)

diff  --git a/llvm/test/Transforms/Inline/inline_invoke.ll b/llvm/test/Transforms/Inline/inline_invoke.ll
index 8f95cf10cdfda..89c56447c07bd 100644
--- a/llvm/test/Transforms/Inline/inline_invoke.ll
+++ b/llvm/test/Transforms/Inline/inline_invoke.ll
@@ -9,59 +9,59 @@
 
 %struct.A = type { i8 }
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
-declare void @_ZN1AC1Ev(%struct.A*)
+declare void @_ZN1AC1Ev(ptr)
 
-declare void @_ZN1AD1Ev(%struct.A*)
+declare void @_ZN1AD1Ev(ptr)
 
 declare void @use(i32) nounwind
 
 declare void @opaque()
 
-declare i32 @llvm.eh.typeid.for(i8*) nounwind
+declare i32 @llvm.eh.typeid.for(ptr) nounwind
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()
 
 declare void @_ZSt9terminatev()
 
-define internal void @test0_in() alwaysinline uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define internal void @test0_in() alwaysinline uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   %a = alloca %struct.A, align 1
   %b = alloca %struct.A, align 1
-  call void @_ZN1AC1Ev(%struct.A* %a)
-  invoke void @_ZN1AC1Ev(%struct.A* %b)
+  call void @_ZN1AC1Ev(ptr %a)
+  invoke void @_ZN1AC1Ev(ptr %b)
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:
-  invoke void @_ZN1AD1Ev(%struct.A* %b)
+  invoke void @_ZN1AD1Ev(ptr %b)
           to label %invoke.cont1 unwind label %lpad
 
 invoke.cont1:
-  call void @_ZN1AD1Ev(%struct.A* %a)
+  call void @_ZN1AD1Ev(ptr %a)
   ret void
 
 lpad:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
             cleanup
-  invoke void @_ZN1AD1Ev(%struct.A* %a)
+  invoke void @_ZN1AD1Ev(ptr %a)
           to label %invoke.cont2 unwind label %terminate.lpad
 
 invoke.cont2:
-  resume { i8*, i32 } %exn
+  resume { ptr, i32 } %exn
 
 terminate.lpad:
-  %exn1 = landingpad {i8*, i32}
-            catch i8* null
+  %exn1 = landingpad {ptr, i32}
+            catch ptr null
   call void @_ZSt9terminatev() noreturn nounwind
   unreachable
 }
 
-define void @test0_out() uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @test0_out() uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @test0_in()
           to label %ret unwind label %lpad
@@ -70,51 +70,51 @@ ret:
   ret void
 
 lpad:                                             ; preds = %entry
-  %exn = landingpad {i8*, i32}
-            catch i8* bitcast (i8** @_ZTIi to i8*)
-  %eh.exc = extractvalue { i8*, i32 } %exn, 0
-  %eh.selector = extractvalue { i8*, i32 } %exn, 1
-  %0 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
+  %exn = landingpad {ptr, i32}
+            catch ptr @_ZTIi
+  %eh.exc = extractvalue { ptr, i32 } %exn, 0
+  %eh.selector = extractvalue { ptr, i32 } %exn, 1
+  %0 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
   %1 = icmp eq i32 %eh.selector, %0
   br i1 %1, label %catch, label %eh.resume
 
 catch:
-  %ignored = call i8* @__cxa_begin_catch(i8* %eh.exc) nounwind
+  %ignored = call ptr @__cxa_begin_catch(ptr %eh.exc) nounwind
   call void @__cxa_end_catch() nounwind
   br label %ret
 
 eh.resume:
-  resume { i8*, i32 } %exn
+  resume { ptr, i32 } %exn
 }
 
 ; CHECK:    define void @test0_out()
 ; CHECK:      [[A:%.*]] = alloca %struct.A,
 ; CHECK:      [[B:%.*]] = alloca %struct.A,
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[A]])
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[B]])
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[B]])
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[A]])
-; CHECK:      landingpad { i8*, i32 }
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[A]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[B]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[B]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[A]])
+; CHECK:      landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A]])
+; CHECK-NEXT:    catch ptr @_ZTIi
+; CHECK-NEXT: invoke void @_ZN1AD1Ev(ptr [[A]])
 ; CHECK-NEXT:   to label %[[LBL:[^\s]+]] unwind
 ; CHECK: [[LBL]]:
 ; CHECK-NEXT: br label %[[LPAD:[^\s]+]]
 ; CHECK:      ret void
-; CHECK:      landingpad { i8*, i32 }
-; CHECK-NEXT:    catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK:      landingpad { ptr, i32 }
+; CHECK-NEXT:    catch ptr @_ZTIi
 ; CHECK-NEXT: br label %[[LPAD]]
 ; CHECK: [[LPAD]]:
-; CHECK-NEXT: phi { i8*, i32 } [
-; CHECK-NEXT: extractvalue { i8*, i32 }
-; CHECK-NEXT: extractvalue { i8*, i32 }
+; CHECK-NEXT: phi { ptr, i32 } [
+; CHECK-NEXT: extractvalue { ptr, i32 }
+; CHECK-NEXT: extractvalue { ptr, i32 }
 ; CHECK-NEXT: call i32 @llvm.eh.typeid.for(
 
 
 ;; Test 1 - Correctly handle phis in outer landing pads.
 
-define void @test1_out() uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @test1_out() uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @test0_in()
           to label %cont unwind label %lpad
@@ -129,23 +129,23 @@ ret:
 lpad:
   %x = phi i32 [ 0, %entry ], [ 1, %cont ]
   %y = phi i32 [ 1, %entry ], [ 4, %cont ]
-  %exn = landingpad {i8*, i32}
-            catch i8* bitcast (i8** @_ZTIi to i8*)
-  %eh.exc = extractvalue { i8*, i32 } %exn, 0
-  %eh.selector = extractvalue { i8*, i32 } %exn, 1
-  %0 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
+  %exn = landingpad {ptr, i32}
+            catch ptr @_ZTIi
+  %eh.exc = extractvalue { ptr, i32 } %exn, 0
+  %eh.selector = extractvalue { ptr, i32 } %exn, 1
+  %0 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
   %1 = icmp eq i32 %eh.selector, %0
   br i1 %1, label %catch, label %eh.resume
 
 catch:
-  %ignored = call i8* @__cxa_begin_catch(i8* %eh.exc) nounwind
+  %ignored = call ptr @__cxa_begin_catch(ptr %eh.exc) nounwind
   call void @use(i32 %x)
   call void @use(i32 %y)
   call void @__cxa_end_catch() nounwind
   br label %ret
 
 eh.resume:
-  resume { i8*, i32 } %exn
+  resume { ptr, i32 } %exn
 }
 
 ; CHECK:    define void @test1_out()
@@ -153,40 +153,40 @@ eh.resume:
 ; CHECK:      [[B2:%.*]] = alloca %struct.A,
 ; CHECK:      [[A1:%.*]] = alloca %struct.A,
 ; CHECK:      [[B1:%.*]] = alloca %struct.A,
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[A1]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[A1]])
 ; CHECK-NEXT:   unwind label %[[LPAD:[^\s]+]]
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[B1]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[B1]])
 ; CHECK-NEXT:   unwind label %[[LPAD1:[^\s]+]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[B1]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[B1]])
 ; CHECK-NEXT:   unwind label %[[LPAD1]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[A1]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[A1]])
 ; CHECK-NEXT:   unwind label %[[LPAD]]
 
 ; Inner landing pad from first inlining.
 ; CHECK:    [[LPAD1]]:
-; CHECK-NEXT: [[LPADVAL1:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[LPADVAL1:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A1]])
+; CHECK-NEXT:    catch ptr @_ZTIi
+; CHECK-NEXT: invoke void @_ZN1AD1Ev(ptr [[A1]])
 ; CHECK-NEXT:   to label %[[RESUME1:[^\s]+]] unwind
 ; CHECK: [[RESUME1]]:
 ; CHECK-NEXT: br label %[[LPAD_JOIN1:[^\s]+]]
 
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[A2]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[A2]])
 ; CHECK-NEXT:   unwind label %[[LPAD]]
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[B2]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[B2]])
 ; CHECK-NEXT:   unwind label %[[LPAD2:[^\s]+]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[B2]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[B2]])
 ; CHECK-NEXT:   unwind label %[[LPAD2]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[A2]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[A2]])
 ; CHECK-NEXT:   unwind label %[[LPAD]]
 
 ; Inner landing pad from second inlining.
 ; CHECK:    [[LPAD2]]:
-; CHECK-NEXT: [[LPADVAL2:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT: [[LPADVAL2:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:   cleanup
-; CHECK-NEXT:   catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: invoke void @_ZN1AD1Ev(%struct.A* [[A2]])
+; CHECK-NEXT:   catch ptr @_ZTIi
+; CHECK-NEXT: invoke void @_ZN1AD1Ev(ptr [[A2]])
 ; CHECK-NEXT:   to label %[[RESUME2:[^\s]+]] unwind
 ; CHECK: [[RESUME2]]:
 ; CHECK-NEXT: br label %[[LPAD_JOIN2:[^\s]+]]
@@ -196,33 +196,33 @@ eh.resume:
 ; CHECK:    [[LPAD]]:
 ; CHECK-NEXT: [[X:%.*]] = phi i32 [ 0, %entry ], [ 0, {{%.*}} ], [ 1, %cont ], [ 1, {{%.*}} ]
 ; CHECK-NEXT: [[Y:%.*]] = phi i32 [ 1, %entry ], [ 1, {{%.*}} ], [ 4, %cont ], [ 4, {{%.*}} ]
-; CHECK-NEXT: [[LPADVAL:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT:   catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK-NEXT: [[LPADVAL:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT:   catch ptr @_ZTIi
 ; CHECK-NEXT: br label %[[LPAD_JOIN2]]
 
 ; CHECK: [[LPAD_JOIN2]]:
 ; CHECK-NEXT: [[XJ2:%.*]] = phi i32 [ [[X]], %[[LPAD]] ], [ 1, %[[RESUME2]] ]
 ; CHECK-NEXT: [[YJ2:%.*]] = phi i32 [ [[Y]], %[[LPAD]] ], [ 4, %[[RESUME2]] ]
-; CHECK-NEXT: [[EXNJ2:%.*]] = phi { i8*, i32 } [ [[LPADVAL]], %[[LPAD]] ], [ [[LPADVAL2]], %[[RESUME2]] ]
+; CHECK-NEXT: [[EXNJ2:%.*]] = phi { ptr, i32 } [ [[LPADVAL]], %[[LPAD]] ], [ [[LPADVAL2]], %[[RESUME2]] ]
 ; CHECK-NEXT: br label %[[LPAD_JOIN1]]
 
 ; CHECK: [[LPAD_JOIN1]]:
 ; CHECK-NEXT: [[XJ1:%.*]] = phi i32 [ [[XJ2]], %[[LPAD_JOIN2]] ], [ 0, %[[RESUME1]] ]
 ; CHECK-NEXT: [[YJ1:%.*]] = phi i32 [ [[YJ2]], %[[LPAD_JOIN2]] ], [ 1, %[[RESUME1]] ]
-; CHECK-NEXT: [[EXNJ1:%.*]] = phi { i8*, i32 } [ [[EXNJ2]], %[[LPAD_JOIN2]] ], [ [[LPADVAL1]], %[[RESUME1]] ]
-; CHECK-NEXT: extractvalue { i8*, i32 } [[EXNJ1]], 0
-; CHECK-NEXT: [[SELJ1:%.*]] = extractvalue { i8*, i32 } [[EXNJ1]], 1
+; CHECK-NEXT: [[EXNJ1:%.*]] = phi { ptr, i32 } [ [[EXNJ2]], %[[LPAD_JOIN2]] ], [ [[LPADVAL1]], %[[RESUME1]] ]
+; CHECK-NEXT: extractvalue { ptr, i32 } [[EXNJ1]], 0
+; CHECK-NEXT: [[SELJ1:%.*]] = extractvalue { ptr, i32 } [[EXNJ1]], 1
 ; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.eh.typeid.for(
 ; CHECK-NEXT: icmp eq i32 [[SELJ1]], [[T]]
 
 ; CHECK:      call void @use(i32 [[XJ1]])
 ; CHECK:      call void @use(i32 [[YJ1]])
 
-; CHECK:      resume { i8*, i32 }
+; CHECK:      resume { ptr, i32 }
 
 
 ;; Test 2 - Don't make invalid IR for inlines into landing pads without eh.exception calls
-define void @test2_out() uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @test2_out() uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @test0_in()
           to label %ret unwind label %lpad
@@ -231,7 +231,7 @@ ret:
   ret void
 
 lpad:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
             cleanup
   call void @_ZSt9terminatev()
   unreachable
@@ -240,18 +240,18 @@ lpad:
 ; CHECK: define void @test2_out()
 ; CHECK:      [[A:%.*]] = alloca %struct.A,
 ; CHECK:      [[B:%.*]] = alloca %struct.A,
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[A]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[A]])
 ; CHECK-NEXT:   unwind label %[[LPAD:[^\s]+]]
-; CHECK:      invoke void @_ZN1AC1Ev(%struct.A* [[B]])
+; CHECK:      invoke void @_ZN1AC1Ev(ptr [[B]])
 ; CHECK-NEXT:   unwind label %[[LPAD2:[^\s]+]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[B]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[B]])
 ; CHECK-NEXT:   unwind label %[[LPAD2]]
-; CHECK:      invoke void @_ZN1AD1Ev(%struct.A* [[A]])
+; CHECK:      invoke void @_ZN1AD1Ev(ptr [[A]])
 ; CHECK-NEXT:   unwind label %[[LPAD]]
 
 
 ;; Test 3 - Deal correctly with split unwind edges.
-define void @test3_out() uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @test3_out() uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @test0_in()
           to label %ret unwind label %lpad
@@ -260,8 +260,8 @@ ret:
   ret void
 
 lpad:
-  %exn = landingpad {i8*, i32}
-            catch i8* bitcast (i8** @_ZTIi to i8*)
+  %exn = landingpad {ptr, i32}
+            catch ptr @_ZTIi
   br label %lpad.cont
 
 lpad.cont:
@@ -270,22 +270,22 @@ lpad.cont:
 }
 
 ; CHECK: define void @test3_out()
-; CHECK:      landingpad { i8*, i32 }
+; CHECK:      landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK-NEXT:    catch ptr @_ZTIi
 ; CHECK-NEXT: invoke void @_ZN1AD1Ev(
 ; CHECK-NEXT:   to label %[[L:[^\s]+]] unwind
 ; CHECK:    [[L]]:
 ; CHECK-NEXT: br label %[[JOIN:[^\s]+]]
 ; CHECK:    [[JOIN]]:
-; CHECK-NEXT: phi { i8*, i32 }
+; CHECK-NEXT: phi { ptr, i32 }
 ; CHECK-NEXT: br label %lpad.cont
 ; CHECK:    lpad.cont:
 ; CHECK-NEXT: call void @_ZSt9terminatev()
 
 
 ;; Test 4 - Split unwind edges with a dominance problem
-define void @test4_out() uwtable ssp personality i32 (...)* @__gxx_personality_v0 {
+define void @test4_out() uwtable ssp personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @test0_in()
           to label %cont unwind label %lpad.crit
@@ -298,14 +298,14 @@ ret:
   ret void
 
 lpad.crit:
-  %exn = landingpad {i8*, i32}
-            catch i8* bitcast (i8** @_ZTIi to i8*)
+  %exn = landingpad {ptr, i32}
+            catch ptr @_ZTIi
   call void @opaque() nounwind
   br label %terminate
 
 lpad:
-  %exn2 = landingpad {i8*, i32}
-            catch i8* bitcast (i8** @_ZTIi to i8*)
+  %exn2 = landingpad {ptr, i32}
+            catch ptr @_ZTIi
   br label %terminate
 
 terminate:
@@ -316,9 +316,9 @@ terminate:
 }
 
 ; CHECK: define void @test4_out()
-; CHECK:      landingpad { i8*, i32 }
+; CHECK:      landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK-NEXT:    catch ptr @_ZTIi
 ; CHECK-NEXT: invoke void @_ZN1AD1Ev(
 ; CHECK-NEXT:   to label %[[L:[^\s]+]] unwind
 ; CHECK:    [[L]]:
@@ -326,16 +326,16 @@ terminate:
 ; CHECK:      invoke void @opaque()
 ; CHECK-NEXT:                  unwind label %lpad
 ; CHECK:    lpad.crit:
-; CHECK-NEXT: landingpad { i8*, i32 }
-; CHECK-NEXT:   catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK-NEXT: landingpad { ptr, i32 }
+; CHECK-NEXT:   catch ptr @_ZTIi
 ; CHECK-NEXT: br label %[[JOIN]]
 ; CHECK:    [[JOIN]]:
-; CHECK-NEXT: phi { i8*, i32 }
+; CHECK-NEXT: phi { ptr, i32 }
 ; CHECK-NEXT: call void @opaque() [[NUW:#[0-9]+]]
 ; CHECK-NEXT: br label %[[FIX:[^\s]+]]
 ; CHECK:    lpad:
-; CHECK-NEXT: landingpad { i8*, i32 }
-; CHECK-NEXT:   catch i8* bitcast (i8** @_ZTIi to i8*)
+; CHECK-NEXT: landingpad { ptr, i32 }
+; CHECK-NEXT:   catch ptr @_ZTIi
 ; CHECK-NEXT: br label %[[FIX]]
 ; CHECK:    [[FIX]]:
 ; CHECK-NEXT: [[T1:%.*]] = phi i32 [ 0, %[[JOIN]] ], [ 1, %lpad ]

diff  --git a/llvm/test/Transforms/Inline/inline_minisize.ll b/llvm/test/Transforms/Inline/inline_minisize.ll
index c19b94fdc4090..fc5f60873c0ed 100644
--- a/llvm/test/Transforms/Inline/inline_minisize.ll
+++ b/llvm/test/Transforms/Inline/inline_minisize.ll
@@ -1,200 +1,200 @@
 ; RUN: opt -passes=inline -inline-threshold=225 -inlinehint-threshold=360 -S < %s | FileCheck %s
 
- at data = common global i32* null, align 8
+ at data = common global ptr null, align 8
 
 define i32 @fct1(i32 %a) nounwind uwtable ssp {
 entry:
   %a.addr = alloca i32, align 4
   %res = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   %idxprom = sext i32 %tmp to i64
-  %tmp1 = load i32*, i32** @data, align 8
-  %arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
-  %tmp2 = load i32, i32* %arrayidx, align 4
-  %tmp3 = load i32, i32* %a.addr, align 4
+  %tmp1 = load ptr, ptr @data, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %tmp1, i64 %idxprom
+  %tmp2 = load i32, ptr %arrayidx, align 4
+  %tmp3 = load i32, ptr %a.addr, align 4
   %add = add nsw i32 %tmp3, 1
   %idxprom1 = sext i32 %add to i64
-  %tmp4 = load i32*, i32** @data, align 8
-  %arrayidx2 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom1
-  %tmp5 = load i32, i32* %arrayidx2, align 4
+  %tmp4 = load ptr, ptr @data, align 8
+  %arrayidx2 = getelementptr inbounds i32, ptr %tmp4, i64 %idxprom1
+  %tmp5 = load i32, ptr %arrayidx2, align 4
   %mul = mul nsw i32 %tmp2, %tmp5
-  store i32 %mul, i32* %res, align 4
-  store i32 0, i32* %i, align 4
-  store i32 0, i32* %i, align 4
+  store i32 %mul, ptr %res, align 4
+  store i32 0, ptr %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %tmp6 = load i32, i32* %i, align 4
-  %tmp7 = load i32, i32* %res, align 4
+  %tmp6 = load i32, ptr %i, align 4
+  %tmp7 = load i32, ptr %res, align 4
   %cmp = icmp slt i32 %tmp6, %tmp7
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %tmp8 = load i32, i32* %i, align 4
+  %tmp8 = load i32, ptr %i, align 4
   %idxprom3 = sext i32 %tmp8 to i64
-  %tmp9 = load i32*, i32** @data, align 8
-  %arrayidx4 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom3
-  call void @fct0(i32* %arrayidx4)
+  %tmp9 = load ptr, ptr @data, align 8
+  %arrayidx4 = getelementptr inbounds i32, ptr %tmp9, i64 %idxprom3
+  call void @fct0(ptr %arrayidx4)
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %tmp10 = load i32, i32* %i, align 4
+  %tmp10 = load i32, ptr %i, align 4
   %inc = add nsw i32 %tmp10, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond5
 
 for.cond5:                                        ; preds = %for.inc10, %for.end
-  %tmp11 = load i32, i32* %i, align 4
-  %tmp12 = load i32, i32* %res, align 4
+  %tmp11 = load i32, ptr %i, align 4
+  %tmp12 = load i32, ptr %res, align 4
   %cmp6 = icmp slt i32 %tmp11, %tmp12
   br i1 %cmp6, label %for.body7, label %for.end12
 
 for.body7:                                        ; preds = %for.cond5
-  %tmp13 = load i32, i32* %i, align 4
+  %tmp13 = load i32, ptr %i, align 4
   %idxprom8 = sext i32 %tmp13 to i64
-  %tmp14 = load i32*, i32** @data, align 8
-  %arrayidx9 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom8
-  call void @fct0(i32* %arrayidx9)
+  %tmp14 = load ptr, ptr @data, align 8
+  %arrayidx9 = getelementptr inbounds i32, ptr %tmp14, i64 %idxprom8
+  call void @fct0(ptr %arrayidx9)
   br label %for.inc10
 
 for.inc10:                                        ; preds = %for.body7
-  %tmp15 = load i32, i32* %i, align 4
+  %tmp15 = load i32, ptr %i, align 4
   %inc11 = add nsw i32 %tmp15, 1
-  store i32 %inc11, i32* %i, align 4
+  store i32 %inc11, ptr %i, align 4
   br label %for.cond5
 
 for.end12:                                        ; preds = %for.cond5
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond13
 
 for.cond13:                                       ; preds = %for.inc18, %for.end12
-  %tmp16 = load i32, i32* %i, align 4
-  %tmp17 = load i32, i32* %res, align 4
+  %tmp16 = load i32, ptr %i, align 4
+  %tmp17 = load i32, ptr %res, align 4
   %cmp14 = icmp slt i32 %tmp16, %tmp17
   br i1 %cmp14, label %for.body15, label %for.end20
 
 for.body15:                                       ; preds = %for.cond13
-  %tmp18 = load i32, i32* %i, align 4
+  %tmp18 = load i32, ptr %i, align 4
   %idxprom16 = sext i32 %tmp18 to i64
-  %tmp19 = load i32*, i32** @data, align 8
-  %arrayidx17 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom16
-  call void @fct0(i32* %arrayidx17)
+  %tmp19 = load ptr, ptr @data, align 8
+  %arrayidx17 = getelementptr inbounds i32, ptr %tmp19, i64 %idxprom16
+  call void @fct0(ptr %arrayidx17)
   br label %for.inc18
 
 for.inc18:                                        ; preds = %for.body15
-  %tmp20 = load i32, i32* %i, align 4
+  %tmp20 = load i32, ptr %i, align 4
   %inc19 = add nsw i32 %tmp20, 1
-  store i32 %inc19, i32* %i, align 4
+  store i32 %inc19, ptr %i, align 4
   br label %for.cond13
 
 for.end20:                                        ; preds = %for.cond13
-  %tmp21 = load i32, i32* %res, align 4
+  %tmp21 = load i32, ptr %res, align 4
   ret i32 %tmp21
 }
 
-declare void @fct0(i32*)
+declare void @fct0(ptr)
 
 define i32 @fct2(i32 %a) nounwind uwtable inlinehint ssp {
 entry:
   %a.addr = alloca i32, align 4
   %res = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   %shl = shl i32 %tmp, 1
   %idxprom = sext i32 %shl to i64
-  %tmp1 = load i32*, i32** @data, align 8
-  %arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
-  %tmp2 = load i32, i32* %arrayidx, align 4
-  %tmp3 = load i32, i32* %a.addr, align 4
+  %tmp1 = load ptr, ptr @data, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %tmp1, i64 %idxprom
+  %tmp2 = load i32, ptr %arrayidx, align 4
+  %tmp3 = load i32, ptr %a.addr, align 4
   %shl1 = shl i32 %tmp3, 1
   %add = add nsw i32 %shl1, 13
   %idxprom2 = sext i32 %add to i64
-  %tmp4 = load i32*, i32** @data, align 8
-  %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom2
-  %tmp5 = load i32, i32* %arrayidx3, align 4
+  %tmp4 = load ptr, ptr @data, align 8
+  %arrayidx3 = getelementptr inbounds i32, ptr %tmp4, i64 %idxprom2
+  %tmp5 = load i32, ptr %arrayidx3, align 4
   %mul = mul nsw i32 %tmp2, %tmp5
-  store i32 %mul, i32* %res, align 4
-  store i32 0, i32* %i, align 4
-  store i32 0, i32* %i, align 4
+  store i32 %mul, ptr %res, align 4
+  store i32 0, ptr %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %tmp6 = load i32, i32* %i, align 4
-  %tmp7 = load i32, i32* %res, align 4
+  %tmp6 = load i32, ptr %i, align 4
+  %tmp7 = load i32, ptr %res, align 4
   %cmp = icmp slt i32 %tmp6, %tmp7
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %tmp8 = load i32, i32* %i, align 4
+  %tmp8 = load i32, ptr %i, align 4
   %idxprom4 = sext i32 %tmp8 to i64
-  %tmp9 = load i32*, i32** @data, align 8
-  %arrayidx5 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom4
-  call void @fct0(i32* %arrayidx5)
+  %tmp9 = load ptr, ptr @data, align 8
+  %arrayidx5 = getelementptr inbounds i32, ptr %tmp9, i64 %idxprom4
+  call void @fct0(ptr %arrayidx5)
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %tmp10 = load i32, i32* %i, align 4
+  %tmp10 = load i32, ptr %i, align 4
   %inc = add nsw i32 %tmp10, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond6
 
 for.cond6:                                        ; preds = %for.inc11, %for.end
-  %tmp11 = load i32, i32* %i, align 4
-  %tmp12 = load i32, i32* %res, align 4
+  %tmp11 = load i32, ptr %i, align 4
+  %tmp12 = load i32, ptr %res, align 4
   %cmp7 = icmp slt i32 %tmp11, %tmp12
   br i1 %cmp7, label %for.body8, label %for.end13
 
 for.body8:                                        ; preds = %for.cond6
-  %tmp13 = load i32, i32* %i, align 4
+  %tmp13 = load i32, ptr %i, align 4
   %idxprom9 = sext i32 %tmp13 to i64
-  %tmp14 = load i32*, i32** @data, align 8
-  %arrayidx10 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom9
-  call void @fct0(i32* %arrayidx10)
+  %tmp14 = load ptr, ptr @data, align 8
+  %arrayidx10 = getelementptr inbounds i32, ptr %tmp14, i64 %idxprom9
+  call void @fct0(ptr %arrayidx10)
   br label %for.inc11
 
 for.inc11:                                        ; preds = %for.body8
-  %tmp15 = load i32, i32* %i, align 4
+  %tmp15 = load i32, ptr %i, align 4
   %inc12 = add nsw i32 %tmp15, 1
-  store i32 %inc12, i32* %i, align 4
+  store i32 %inc12, ptr %i, align 4
   br label %for.cond6
 
 for.end13:                                        ; preds = %for.cond6
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond14
 
 for.cond14:                                       ; preds = %for.inc19, %for.end13
-  %tmp16 = load i32, i32* %i, align 4
-  %tmp17 = load i32, i32* %res, align 4
+  %tmp16 = load i32, ptr %i, align 4
+  %tmp17 = load i32, ptr %res, align 4
   %cmp15 = icmp slt i32 %tmp16, %tmp17
   br i1 %cmp15, label %for.body16, label %for.end21
 
 for.body16:                                       ; preds = %for.cond14
-  %tmp18 = load i32, i32* %i, align 4
+  %tmp18 = load i32, ptr %i, align 4
   %idxprom17 = sext i32 %tmp18 to i64
-  %tmp19 = load i32*, i32** @data, align 8
-  %arrayidx18 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom17
-  call void @fct0(i32* %arrayidx18)
+  %tmp19 = load ptr, ptr @data, align 8
+  %arrayidx18 = getelementptr inbounds i32, ptr %tmp19, i64 %idxprom17
+  call void @fct0(ptr %arrayidx18)
   br label %for.inc19
 
 for.inc19:                                        ; preds = %for.body16
-  %tmp20 = load i32, i32* %i, align 4
+  %tmp20 = load i32, ptr %i, align 4
   %inc20 = add nsw i32 %tmp20, 1
-  store i32 %inc20, i32* %i, align 4
+  store i32 %inc20, ptr %i, align 4
   br label %for.cond14
 
 for.end21:                                        ; preds = %for.cond14
-  %tmp21 = load i32, i32* %res, align 4
+  %tmp21 = load i32, ptr %res, align 4
   ret i32 %tmp21
 }
 
@@ -205,10 +205,10 @@ entry:
   ; The inline keyword gives a sufficient benefits to inline fct2
   ;CHECK-NOT: call i32 @fct2
   %c.addr = alloca i32, align 4
-  store i32 %c, i32* %c.addr, align 4
-  %tmp = load i32, i32* %c.addr, align 4
+  store i32 %c, ptr %c.addr, align 4
+  %tmp = load i32, ptr %c.addr, align 4
   %call = call i32 @fct1(i32 %tmp)
-  %tmp1 = load i32, i32* %c.addr, align 4
+  %tmp1 = load i32, ptr %c.addr, align 4
   %call1 = call i32 @fct2(i32 %tmp1)
   %add = add nsw i32 %call, %call1
   ret i32 %add
@@ -222,10 +222,10 @@ entry:
   ; is the same as fct1, thus no inlining for fct2
   ;CHECK: call i32 @fct2
   %c.addr = alloca i32, align 4
-  store i32 %c, i32* %c.addr, align 4
-  %tmp = load i32, i32* %c.addr, align 4
+  store i32 %c, ptr %c.addr, align 4
+  %tmp = load i32, ptr %c.addr, align 4
   %call = call i32 @fct1(i32 %tmp)
-  %tmp1 = load i32, i32* %c.addr, align 4
+  %tmp1 = load i32, ptr %c.addr, align 4
   %call1 = call i32 @fct2(i32 %tmp1)
   %add = add nsw i32 %call, %call1
   ret i32 %add

diff  --git a/llvm/test/Transforms/Inline/inline_returns_twice.ll b/llvm/test/Transforms/Inline/inline_returns_twice.ll
index 9a57af0966483..cacedc0e0bcf0 100644
--- a/llvm/test/Transforms/Inline/inline_returns_twice.ll
+++ b/llvm/test/Transforms/Inline/inline_returns_twice.ll
@@ -38,7 +38,7 @@ entry:
   ret i32 %add
 }
 
-define i32 @inner3() personality i8* null {
+define i32 @inner3() personality ptr null {
 entry:
   %invoke = invoke i32 @a() returns_twice
       to label %cont unwind label %lpad
@@ -61,7 +61,7 @@ entry:
   ret i32 %add
 }
 
-define i32 @inner4() returns_twice personality i8* null {
+define i32 @inner4() returns_twice personality ptr null {
 entry:
   %invoke = invoke i32 @a() returns_twice
       to label %cont unwind label %lpad

diff  --git a/llvm/test/Transforms/Inline/inline_ssp.ll b/llvm/test/Transforms/Inline/inline_ssp.ll
index 5ae2420abd201..ab6f8d5158007 100644
--- a/llvm/test/Transforms/Inline/inline_ssp.ll
+++ b/llvm/test/Transforms/Inline/inline_ssp.ll
@@ -16,31 +16,31 @@
 
 define internal void @fun_sspreq() sspreq {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str3, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str3)
   ret void
 }
 
 define internal void @fun_sspreq_alwaysinline() sspreq alwaysinline {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str3, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str3)
   ret void
 }
 
 define internal void @fun_sspstrong() sspstrong {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str2, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str2)
   ret void
 }
 
 define internal void @fun_ssp() ssp {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str1, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str1)
   ret void
 }
 
 define internal void @fun_nossp() {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str)
   ret void
 }
 
@@ -168,7 +168,7 @@ entry:
   ret void
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 ; CHECK: attributes #[[SSPREQ]] = { sspreq }
 ; CHECK: attributes #[[SSPSTRONG]] = { sspstrong }

diff  --git a/llvm/test/Transforms/Inline/inline_unreachable.ll b/llvm/test/Transforms/Inline/inline_unreachable.ll
index 9699ff68e4fb6..e5c8bfd37a334 100644
--- a/llvm/test/Transforms/Inline/inline_unreachable.ll
+++ b/llvm/test/Transforms/Inline/inline_unreachable.ll
@@ -2,7 +2,7 @@
 ; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
 
 @a = global i32 4
- at _ZTIi = external global i8*
+ at _ZTIi = external global ptr
 
 ; CHECK-LABEL: callSimpleFunction
 ; CHECK: call i32 @simpleFunction
@@ -13,7 +13,7 @@ entry:
 
 if.then:
   %s = call i32 @simpleFunction(i32 %idx)
-  store i32 %s, i32* @a
+  store i32 %s, ptr @a
   unreachable
 
 if.end:
@@ -29,7 +29,7 @@ entry:
 
 if.then:
   %s = call i32 @smallFunction(i32 %idx)
-  store i32 %s, i32* @a
+  store i32 %s, ptr @a
   unreachable
 
 if.end:
@@ -38,22 +38,22 @@ if.end:
 
 ; CHECK-LABEL: throwSimpleException
 ; CHECK: invoke i32 @simpleFunction
-define i32 @throwSimpleException(i32 %idx, i32 %limit) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @throwSimpleException(i32 %idx, i32 %limit) #0 personality ptr @__gxx_personality_v0 {
 entry:
   %cmp = icmp sge i32 %idx, %limit
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %exception = call i8* @__cxa_allocate_exception(i64 1) #0
+  %exception = call ptr @__cxa_allocate_exception(i64 1) #0
   invoke i32 @simpleFunction(i32 %idx)
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %if.then
-  call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi  to i8*), i8* null) #1
+  call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #1
   unreachable
 
 lpad:                                             ; preds = %if.then
-  %ll = landingpad { i8*, i32 }
+  %ll = landingpad { ptr, i32 }
           cleanup
   ret i32 %idx
 
@@ -63,22 +63,22 @@ if.end:                                           ; preds = %entry
 
 ; CHECK-LABEL: throwSmallException
 ; CHECK-NOT: invoke i32 @smallFunction
-define i32 @throwSmallException(i32 %idx, i32 %limit) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @throwSmallException(i32 %idx, i32 %limit) #0 personality ptr @__gxx_personality_v0 {
 entry:
   %cmp = icmp sge i32 %idx, %limit
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %exception = call i8* @__cxa_allocate_exception(i64 1) #0
+  %exception = call ptr @__cxa_allocate_exception(i64 1) #0
   invoke i32 @smallFunction(i32 %idx)
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %if.then
-  call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi  to i8*), i8* null) #1
+  call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #1
   unreachable
 
 lpad:                                             ; preds = %if.then
-  %ll = landingpad { i8*, i32 }
+  %ll = landingpad { ptr, i32 }
           cleanup
   ret i32 %idx
 
@@ -88,29 +88,29 @@ if.end:                                           ; preds = %entry
 
 define i32 @simpleFunction(i32 %a) #0 {
 entry:
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a1
-  %a2 = load volatile i32, i32* @a
+  %a2 = load volatile i32, ptr @a
   %x2 = add i32 %x1, %a2
-  %a3 = load volatile i32, i32* @a
+  %a3 = load volatile i32, ptr @a
   %x3 = add i32 %x2, %a3
-  %a4 = load volatile i32, i32* @a
+  %a4 = load volatile i32, ptr @a
   %x4 = add i32 %x3, %a4
-  %a5 = load volatile i32, i32* @a
+  %a5 = load volatile i32, ptr @a
   %x5 = add i32 %x4, %a5
-  %a6 = load volatile i32, i32* @a
+  %a6 = load volatile i32, ptr @a
   %x6 = add i32 %x5, %a6
-  %a7 = load volatile i32, i32* @a
+  %a7 = load volatile i32, ptr @a
   %x7 = add i32 %x6, %a6
-  %a8 = load volatile i32, i32* @a
+  %a8 = load volatile i32, ptr @a
   %x8 = add i32 %x7, %a8
-  %a9 = load volatile i32, i32* @a
+  %a9 = load volatile i32, ptr @a
   %x9 = add i32 %x8, %a9
-  %a10 = load volatile i32, i32* @a
+  %a10 = load volatile i32, ptr @a
   %x10 = add i32 %x9, %a10
-  %a11 = load volatile i32, i32* @a
+  %a11 = load volatile i32, ptr @a
   %x11 = add i32 %x10, %a11
-  %a12 = load volatile i32, i32* @a
+  %a12 = load volatile i32, ptr @a
   %x12 = add i32 %x11, %a12
   %add = add i32 %x12, %a
   ret i32 %add
@@ -118,14 +118,14 @@ entry:
 
 define i32 @smallFunction(i32 %a) {
 entry:
-  %r = load volatile i32, i32* @a
+  %r = load volatile i32, ptr @a
   ret i32 %r
 }
 
 attributes #0 = { nounwind }
 attributes #1 = { noreturn }
 
-declare i8* @__cxa_allocate_exception(i64)
+declare ptr @__cxa_allocate_exception(i64)
 declare i32 @__gxx_personality_v0(...)
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
 

diff  --git a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
index d6913c0e8f9a9..12a328dfe5af3 100644
--- a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
+++ b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
@@ -166,29 +166,29 @@ entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
   br label %for.cond
 for.cond:
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end
 for.body:
   br label %for.cond, !llvm.loop !2
 for.end:
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond1
 for.cond1:
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp2 = icmp slt i32 %2, 10
   br i1 %cmp2, label %for.body3, label %for.end4
 for.body3:
   br label %for.inc
 for.inc:
-  %3 = load i32, i32* %i, align 4
+  %3 = load i32, ptr %i, align 4
   %inc = add nsw i32 %3, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond1, !llvm.loop !4
 for.end4:
   br label %while.body
@@ -207,58 +207,55 @@ define void @caller_multiple(i32 %a, i32 %b) #1 {
 ; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
-; CHECK-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
+; CHECK-NEXT:    store i32 [[A]], ptr [[A_ADDR]], align 4
+; CHECK-NEXT:    store i32 [[B]], ptr [[B_ADDR]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND:%.*]]
 ; CHECK:       for.cond:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    br label [[FOR_COND]]
 ; CHECK:       for.end:
-; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1:%.*]]
 ; CHECK:       for.cond1:
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[I]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[I]], align 4
 ; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP2]], 10
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END4:%.*]]
 ; CHECK:       for.body3:
 ; CHECK-NEXT:    br label [[FOR_INC:%.*]]
 ; CHECK:       for.inc:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[I]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[I]], align 4
 ; CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP3]], 1
-; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
+; CHECK-NEXT:    store i32 [[INC]], ptr [[I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1]]
 ; CHECK:       for.end4:
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[A_ADDR_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32* [[B_ADDR_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP5]])
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[I_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP6]])
-; CHECK-NEXT:    store i32 0, i32* [[A_ADDR_I]], align 4
-; CHECK-NEXT:    store i32 5, i32* [[B_ADDR_I]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT:    store i32 0, ptr [[A_ADDR_I]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[B_ADDR_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND_I:%.*]]
 ; CHECK:       for.cond.i:
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A_ADDR_I]], align 4
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
 ; CHECK-NEXT:    [[CMP_I:%.*]] = icmp slt i32 [[TMP7]], [[TMP8]]
 ; CHECK-NEXT:    br i1 [[CMP_I]], label [[FOR_BODY_I:%.*]], label [[FOR_END_I:%.*]]
 ; CHECK:       for.body.i:
   ; CHECK-NEXT:    br label [[FOR_COND_I]], !llvm.loop [[LOOP2:![0-9]+]]
 ; CHECK:       for.end.i:
-; CHECK-NEXT:    store i32 0, i32* [[I_I]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[I_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1_I:%.*]]
 ; CHECK:       for.cond1.i:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I_I]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[I_I]], align 4
 ; CHECK-NEXT:    [[CMP2_I:%.*]] = icmp slt i32 [[TMP9]], 10
 ; CHECK-NEXT:    br i1 [[CMP2_I]], label [[FOR_BODY3_I:%.*]], label [[FOR_END4_I:%.*]]
 ; CHECK:       for.body3.i:
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[I_I]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr [[I_I]], align 4
 ; CHECK-NEXT:    [[INC_I:%.*]] = add nsw i32 [[TMP10]], 1
-; CHECK-NEXT:    store i32 [[INC_I]], i32* [[I_I]], align 4
+; CHECK-NEXT:    store i32 [[INC_I]], ptr [[I_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1_I]], !llvm.loop [[LOOP3:![0-9]+]]
 ; CHECK:       for.end4.i:
 ; CHECK-NEXT:    br label [[WHILE_BODY_I:%.*]]
@@ -271,29 +268,29 @@ entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
   br label %for.cond
 for.cond:
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end
 for.body:
   br label %for.cond
 for.end:
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond1
 for.cond1:
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp2 = icmp slt i32 %2, 10
   br i1 %cmp2, label %for.body3, label %for.end4
 for.body3:
   br label %for.inc
 for.inc:
-  %3 = load i32, i32* %i, align 4
+  %3 = load i32, ptr %i, align 4
   %inc = add nsw i32 %3, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond1
 for.end4:
   call void @callee_multiple(i32 0, i32 5)
@@ -305,28 +302,28 @@ entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
   br label %for.cond
 for.cond:
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end
 for.body:
   br label %for.cond, !llvm.loop !0
 for.end:
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond1
 for.cond1:
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp2 = icmp slt i32 %2, 10
   br i1 %cmp2, label %for.body3, label %for.end8
 for.body3:
   br label %for.cond4
 for.cond4:
-  %3 = load i32, i32* %b.addr, align 4
-  %4 = load i32, i32* %a.addr, align 4
+  %3 = load i32, ptr %b.addr, align 4
+  %4 = load i32, ptr %a.addr, align 4
   %cmp5 = icmp slt i32 %3, %4
   br i1 %cmp5, label %for.body6, label %for.end7
 for.body6:
@@ -334,9 +331,9 @@ for.body6:
 for.end7:
   br label %for.inc
 for.inc:
-  %5 = load i32, i32* %i, align 4
+  %5 = load i32, ptr %i, align 4
   %inc = add nsw i32 %5, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond1, !llvm.loop !3
 for.end8:
   br label %while.body
@@ -356,26 +353,26 @@ define void @caller_nested(i32 %a, i32 %b) #1 {
 ; CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[I9:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
-; CHECK-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
+; CHECK-NEXT:    store i32 [[A]], ptr [[A_ADDR]], align 4
+; CHECK-NEXT:    store i32 [[B]], ptr [[B_ADDR]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND:%.*]]
 ; CHECK:       for.cond:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END8:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1:%.*]]
 ; CHECK:       for.cond1:
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[I]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[I]], align 4
 ; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP2]], 10
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END7:%.*]]
 ; CHECK:       for.body3:
 ; CHECK-NEXT:    br label [[FOR_COND4:%.*]]
 ; CHECK:       for.cond4:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[B_ADDR]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[B_ADDR]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
 ; CHECK-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP3]], [[TMP4]]
 ; CHECK-NEXT:    br i1 [[CMP5]], label [[FOR_BODY6:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body6:
@@ -383,63 +380,60 @@ define void @caller_nested(i32 %a, i32 %b) #1 {
 ; CHECK:       for.end:
 ; CHECK-NEXT:    br label [[FOR_INC:%.*]]
 ; CHECK:       for.inc:
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[I]], align 4
 ; CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
-; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
+; CHECK-NEXT:    store i32 [[INC]], ptr [[I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1]]
 ; CHECK:       for.end7:
 ; CHECK-NEXT:    br label [[FOR_COND]]
 ; CHECK:       for.end8:
-; CHECK-NEXT:    store i32 0, i32* [[I9]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[I9]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND10:%.*]]
 ; CHECK:       for.cond10:
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[I9]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[I9]], align 4
 ; CHECK-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP6]], 10
 ; CHECK-NEXT:    br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END15:%.*]]
 ; CHECK:       for.body12:
 ; CHECK-NEXT:    br label [[FOR_INC13:%.*]]
 ; CHECK:       for.inc13:
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[I9]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[I9]], align 4
 ; CHECK-NEXT:    [[INC14:%.*]] = add nsw i32 [[TMP7]], 1
-; CHECK-NEXT:    store i32 [[INC14]], i32* [[I9]], align 4
+; CHECK-NEXT:    store i32 [[INC14]], ptr [[I9]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND10]]
 ; CHECK:       for.end15:
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[A_ADDR_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP8]])
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[B_ADDR_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP9]])
-; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[I_I]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP10]])
-; CHECK-NEXT:    store i32 0, i32* [[A_ADDR_I]], align 4
-; CHECK-NEXT:    store i32 5, i32* [[B_ADDR_I]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT:    store i32 0, ptr [[A_ADDR_I]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[B_ADDR_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND_I:%.*]]
 ; CHECK:       for.cond.i:
-; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A_ADDR_I]], align 4
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
 ; CHECK-NEXT:    [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[TMP12]]
 ; CHECK-NEXT:    br i1 [[CMP_I]], label [[FOR_BODY_I:%.*]], label [[FOR_END_I:%.*]]
 ; CHECK:       for.body.i:
 ; CHECK-NEXT:    br label [[FOR_COND_I]], !llvm.loop [[LOOP0]]
 ; CHECK:       for.end.i:
-; CHECK-NEXT:    store i32 0, i32* [[I_I]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[I_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1_I:%.*]]
 ; CHECK:       for.cond1.i:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I_I]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr [[I_I]], align 4
 ; CHECK-NEXT:    [[CMP2_I:%.*]] = icmp slt i32 [[TMP13]], 10
 ; CHECK-NEXT:    br i1 [[CMP2_I]], label [[FOR_BODY3_I:%.*]], label [[FOR_END8_I:%.*]]
 ; CHECK:       for.body3.i:
 ; CHECK-NEXT:    br label [[FOR_COND4_I:%.*]]
 ; CHECK:       for.cond4.i:
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B_ADDR_I]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* [[A_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[B_ADDR_I]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr [[A_ADDR_I]], align 4
 ; CHECK-NEXT:    [[CMP5_I:%.*]] = icmp slt i32 [[TMP14]], [[TMP15]]
 ; CHECK-NEXT:    br i1 [[CMP5_I]], label [[FOR_BODY6_I:%.*]], label [[FOR_END7_I:%.*]]
 ; CHECK:       for.body6.i:
   ; CHECK-NEXT:    br label [[FOR_COND4_I]], !llvm.loop [[LOOP2:![0-9]+]]
 ; CHECK:       for.end7.i:
-; CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* [[I_I]], align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr [[I_I]], align 4
 ; CHECK-NEXT:    [[INC_I:%.*]] = add nsw i32 [[TMP16]], 1
-; CHECK-NEXT:    store i32 [[INC_I]], i32* [[I_I]], align 4
+; CHECK-NEXT:    store i32 [[INC_I]], ptr [[I_I]], align 4
 ; CHECK-NEXT:    br label [[FOR_COND1_I]], !llvm.loop [[LOOP4:![0-9]+]]
 ; CHECK:       for.end8.i:
 ; CHECK-NEXT:    br label [[WHILE_BODY_I:%.*]]
@@ -453,26 +447,26 @@ entry:
   %b.addr = alloca i32, align 4
   %i = alloca i32, align 4
   %i9 = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
   br label %for.cond
 for.cond:
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end8
 for.body:
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond1
 for.cond1:
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp2 = icmp slt i32 %2, 10
   br i1 %cmp2, label %for.body3, label %for.end7
 for.body3:
   br label %for.cond4
 for.cond4:
-  %3 = load i32, i32* %b.addr, align 4
-  %4 = load i32, i32* %a.addr, align 4
+  %3 = load i32, ptr %b.addr, align 4
+  %4 = load i32, ptr %a.addr, align 4
   %cmp5 = icmp slt i32 %3, %4
   br i1 %cmp5, label %for.body6, label %for.end
 for.body6:
@@ -480,25 +474,25 @@ for.body6:
 for.end:
   br label %for.inc
 for.inc:
-  %5 = load i32, i32* %i, align 4
+  %5 = load i32, ptr %i, align 4
   %inc = add nsw i32 %5, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond1
 for.end7:
   br label %for.cond
 for.end8:
-  store i32 0, i32* %i9, align 4
+  store i32 0, ptr %i9, align 4
   br label %for.cond10
 for.cond10:
-  %6 = load i32, i32* %i9, align 4
+  %6 = load i32, ptr %i9, align 4
   %cmp11 = icmp slt i32 %6, 10
   br i1 %cmp11, label %for.body12, label %for.end15
 for.body12:
   br label %for.inc13
 for.inc13:
-  %7 = load i32, i32* %i9, align 4
+  %7 = load i32, ptr %i9, align 4
   %inc14 = add nsw i32 %7, 1
-  store i32 %inc14, i32* %i9, align 4
+  store i32 %inc14, ptr %i9, align 4
   br label %for.cond10
 for.end15:
   call void @callee_nested(i32 0, i32 5)

diff  --git a/llvm/test/Transforms/Inline/inlinedefault-threshold.ll b/llvm/test/Transforms/Inline/inlinedefault-threshold.ll
index 195316a9121f9..0a40debc2e9ff 100644
--- a/llvm/test/Transforms/Inline/inlinedefault-threshold.ll
+++ b/llvm/test/Transforms/Inline/inlinedefault-threshold.ll
@@ -7,63 +7,63 @@
 
 define i32 @inner() {
   call void @extern()
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a1
   ret i32 %x1
 }
 
 define i32 @inner2() {
   call void @extern()
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a1
-  %a2 = load volatile i32, i32* @a
+  %a2 = load volatile i32, ptr @a
   %x2 = add i32 %x1, %a2
-  %a3 = load volatile i32, i32* @a
+  %a3 = load volatile i32, ptr @a
   %x3 = add i32 %x2, %a3
-  %a4 = load volatile i32, i32* @a
+  %a4 = load volatile i32, ptr @a
   %x4 = add i32 %x3, %a4
-  %a5 = load volatile i32, i32* @a
+  %a5 = load volatile i32, ptr @a
   %x5 = add i32 %x3, %a5
-  %a6 = load volatile i32, i32* @a
+  %a6 = load volatile i32, ptr @a
   %x6 = add i32 %x5, %a6
-  %a7 = load volatile i32, i32* @a
+  %a7 = load volatile i32, ptr @a
   %x7 = add i32 %x6, %a7
-  %a8 = load volatile i32, i32* @a
+  %a8 = load volatile i32, ptr @a
   %x8 = add i32 %x7, %a8
   ret i32 %x8
 }
 
 define i32 @inner3() {
   call void @extern()
-  %a1 = load volatile i32, i32* @a
+  %a1 = load volatile i32, ptr @a
   %x1 = add i32 %a1,  %a1
-  %a2 = load volatile i32, i32* @a
+  %a2 = load volatile i32, ptr @a
   %x2 = add i32 %x1, %a2
-  %a3 = load volatile i32, i32* @a
+  %a3 = load volatile i32, ptr @a
   %x3 = add i32 %x2, %a3
-  %a4 = load volatile i32, i32* @a
+  %a4 = load volatile i32, ptr @a
   %x4 = add i32 %x3, %a4
-  %a5 = load volatile i32, i32* @a
+  %a5 = load volatile i32, ptr @a
   %x5 = add i32 %x4, %a5
-  %a6 = load volatile i32, i32* @a
+  %a6 = load volatile i32, ptr @a
   %x6 = add i32 %x5, %a6
-  %a7 = load volatile i32, i32* @a
+  %a7 = load volatile i32, ptr @a
   %x7 = add i32 %x6, %a7
-  %a8 = load volatile i32, i32* @a
+  %a8 = load volatile i32, ptr @a
   %x8 = add i32 %x7, %a8
-  %a9 = load volatile i32, i32* @a
+  %a9 = load volatile i32, ptr @a
   %x9 = add i32 %x8, %a9
-  %a10 = load volatile i32, i32* @a
+  %a10 = load volatile i32, ptr @a
   %x10 = add i32 %x9, %a10
-  %a11 = load volatile i32, i32* @a
+  %a11 = load volatile i32, ptr @a
   %x11 = add i32 %x10, %a11
-  %a12 = load volatile i32, i32* @a
+  %a12 = load volatile i32, ptr @a
   %x12 = add i32 %x11, %a12
-  %a13 = load volatile i32, i32* @a
+  %a13 = load volatile i32, ptr @a
   %x13 = add i32 %x12, %a13
-  %a14 = load volatile i32, i32* @a
+  %a14 = load volatile i32, ptr @a
   %x14 = add i32 %x13, %a14
-  %a15 = load volatile i32, i32* @a
+  %a15 = load volatile i32, ptr @a
   %x15 = add i32 %x14, %a15
   ret i32 %x15
 }

diff  --git a/llvm/test/Transforms/Inline/invariant-group-sroa.ll b/llvm/test/Transforms/Inline/invariant-group-sroa.ll
index 086e8b6c04bf9..4842cf01c9cf6 100644
--- a/llvm/test/Transforms/Inline/invariant-group-sroa.ll
+++ b/llvm/test/Transforms/Inline/invariant-group-sroa.ll
@@ -3,24 +3,22 @@
 ; SROA analysis should yield non-zero savings for allocas passed through invariant group intrinsics
 ; CHECK: SROACostSavings: 10
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
+declare ptr @llvm.strip.invariant.group.p0(ptr)
 
 declare void @b()
 
 define i32 @f() {
   %a = alloca i32
-  %r = call i32 @g(i32* %a)
+  %r = call i32 @g(ptr %a)
   ret i32 %r
 }
 
-define i32 @g(i32* %a) {
-  %a_i8 = bitcast i32* %a to i8*
-  %a_inv_i8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a_i8)
-  %a_inv = bitcast i8* %a_inv_i8 to i32*
-  %i1 = load i32, i32* %a_inv
-  %i2 = load i32, i32* %a_inv
+define i32 @g(ptr %a) {
+  %a_inv_i8 = call ptr @llvm.launder.invariant.group.p0(ptr %a)
+  %i1 = load i32, ptr %a_inv_i8
+  %i2 = load i32, ptr %a_inv_i8
   %i3 = add i32 %i1, %i2
-  %t = call i8* @llvm.strip.invariant.group.p0i8(i8* %a_inv_i8)
+  %t = call ptr @llvm.strip.invariant.group.p0(ptr %a_inv_i8)
   ret i32 %i3
 }

diff  --git a/llvm/test/Transforms/Inline/invoke-cleanup.ll b/llvm/test/Transforms/Inline/invoke-cleanup.ll
index bc9385b9a8f6e..9e0344b5b08a4 100644
--- a/llvm/test/Transforms/Inline/invoke-cleanup.ll
+++ b/llvm/test/Transforms/Inline/invoke-cleanup.ll
@@ -8,14 +8,14 @@ declare void @external_func()
 @exception_type2 = external global i8
 
 
-define internal void @inner() personality i8* null {
+define internal void @inner() personality ptr null {
   invoke void @external_func()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_type1
+      catch ptr @exception_type1
   resume i32 %lp
 }
 
@@ -23,7 +23,7 @@ lpad:
 ; this call site (PR17872), otherwise C++ destructors will not be
 ; called when they should be.
 
-define void @outer() personality i8* null {
+define void @outer() personality ptr null {
   invoke void @inner()
       to label %cont unwind label %lpad
 cont:
@@ -31,11 +31,11 @@ cont:
 lpad:
   %lp = landingpad i32
       cleanup
-      catch i8* @exception_type2
+      catch ptr @exception_type2
   resume i32 %lp
 }
 ; CHECK: define void @outer
 ; CHECK: landingpad
 ; CHECK-NEXT: cleanup
-; CHECK-NEXT: catch i8* @exception_type1
-; CHECK-NEXT: catch i8* @exception_type2
+; CHECK-NEXT: catch ptr @exception_type1
+; CHECK-NEXT: catch ptr @exception_type2

diff  --git a/llvm/test/Transforms/Inline/invoke-combine-clauses.ll b/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
index e5614736a63bb..ec1dafe8b68a6 100644
--- a/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
+++ b/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
@@ -10,18 +10,18 @@ declare void @abort()
 
 
 ; Check for a bug in which multiple "resume" instructions in the
-; inlined function caused "catch i8* @exception_outer" to appear
+; inlined function caused "catch ptr @exception_outer" to appear
 ; multiple times in the resulting landingpad.
 
-define internal void @inner_multiple_resume() personality i8* null {
+define internal void @inner_multiple_resume() personality ptr null {
   invoke void @external_func()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_inner
-  %cond = load i1, i1* @condition
+      catch ptr @exception_inner
+  %cond = load i1, ptr @condition
   br i1 %cond, label %resume1, label %resume2
 resume1:
   resume i32 1
@@ -29,29 +29,29 @@ resume2:
   resume i32 2
 }
 
-define void @outer_multiple_resume() personality i8* null {
+define void @outer_multiple_resume() personality ptr null {
   invoke void @inner_multiple_resume()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_outer
+      catch ptr @exception_outer
   resume i32 %lp
 }
 ; CHECK: define void @outer_multiple_resume()
 ; CHECK: %lp.i = landingpad
-; CHECK-NEXT: catch i8* @exception_inner
-; CHECK-NEXT: catch i8* @exception_outer
+; CHECK-NEXT: catch ptr @exception_inner
+; CHECK-NEXT: catch ptr @exception_outer
 ; Check that there isn't another "catch" clause:
 ; CHECK-NEXT: load
 
 
 ; Check for a bug in which having a "resume" and a "call" in the
-; inlined function caused "catch i8* @exception_outer" to appear
+; inlined function caused "catch ptr @exception_outer" to appear
 ; multiple times in the resulting landingpad.
 
-define internal void @inner_resume_and_call() personality i8* null {
+define internal void @inner_resume_and_call() personality ptr null {
   call void @external_func()
   invoke void @external_func()
       to label %cont unwind label %lpad
@@ -59,60 +59,60 @@ cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_inner
+      catch ptr @exception_inner
   resume i32 %lp
 }
 
-define void @outer_resume_and_call() personality i8* null {
+define void @outer_resume_and_call() personality ptr null {
   invoke void @inner_resume_and_call()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_outer
+      catch ptr @exception_outer
   resume i32 %lp
 }
 ; CHECK: define void @outer_resume_and_call()
 ; CHECK: %lp.i = landingpad
-; CHECK-NEXT: catch i8* @exception_inner
-; CHECK-NEXT: catch i8* @exception_outer
+; CHECK-NEXT: catch ptr @exception_inner
+; CHECK-NEXT: catch ptr @exception_outer
 ; Check that there isn't another "catch" clause:
 ; CHECK-NEXT: br
 
 
 ; Check what happens if the inlined function contains an "invoke" but
 ; no "resume".  In this case, the inlined landingpad does not need to
-; include the "catch i8* @exception_outer" clause from the outer
+; include the "catch ptr @exception_outer" clause from the outer
 ; function (since the outer function's landingpad will not be
 ; reachable), but it's OK to include this clause.
 
-define internal void @inner_no_resume_or_call() personality i8* null {
+define internal void @inner_no_resume_or_call() personality ptr null {
   invoke void @external_func()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_inner
+      catch ptr @exception_inner
   ; A landingpad might have no "resume" if a C++ destructor aborts.
   call void @abort() noreturn nounwind
   unreachable
 }
 
-define void @outer_no_resume_or_call() personality i8* null {
+define void @outer_no_resume_or_call() personality ptr null {
   invoke void @inner_no_resume_or_call()
       to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
   %lp = landingpad i32
-      catch i8* @exception_outer
+      catch ptr @exception_outer
   resume i32 %lp
 }
 ; CHECK: define void @outer_no_resume_or_call()
 ; CHECK: %lp.i = landingpad
-; CHECK-NEXT: catch i8* @exception_inner
-; CHECK-NEXT: catch i8* @exception_outer
+; CHECK-NEXT: catch ptr @exception_inner
+; CHECK-NEXT: catch ptr @exception_outer
 ; Check that there isn't another "catch" clause:
 ; CHECK-NEXT: call void @abort()

diff  --git a/llvm/test/Transforms/Inline/invoke-cost.ll b/llvm/test/Transforms/Inline/invoke-cost.ll
index aff59671d76d0..1159603c62f32 100644
--- a/llvm/test/Transforms/Inline/invoke-cost.ll
+++ b/llvm/test/Transforms/Inline/invoke-cost.ll
@@ -7,11 +7,11 @@ target datalayout = "p:32:32"
 
 declare void @f()
 declare i32 @__gxx_personality_v0(...)
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 declare void @_ZSt9terminatev()
 
-define void @inner1() personality i32 (...)* @__gxx_personality_v0 {
+define void @inner1() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @f() to label %cont1 unwind label %terminate.lpad
 
@@ -28,8 +28,8 @@ cont4:
   ret void
 
 terminate.lpad:
-  landingpad {i8*, i32}
-            catch i8* null
+  landingpad {ptr, i32}
+            catch ptr null
   call void @_ZSt9terminatev() noreturn nounwind
   unreachable
 }

diff  --git a/llvm/test/Transforms/Inline/invoke_test-1.ll b/llvm/test/Transforms/Inline/invoke_test-1.ll
index 55d6a2befeaff..c09e7aa0fe0a4 100644
--- a/llvm/test/Transforms/Inline/invoke_test-1.ll
+++ b/llvm/test/Transforms/Inline/invoke_test-1.ll
@@ -14,8 +14,8 @@ entry:
 }
 
 ; caller returns true if might_throw throws an exception...
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
-; CHECK-LABEL: define i32 @caller() personality i32 (...)* @__gxx_personality_v0
+define i32 @caller() personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: define i32 @caller() personality ptr @__gxx_personality_v0
 entry:
   invoke void @callee()
       to label %cont unwind label %exc
@@ -26,7 +26,7 @@ cont:
   ret i32 0
 
 exc:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
          cleanup
   ret i32 1
 }

diff  --git a/llvm/test/Transforms/Inline/invoke_test-2.ll b/llvm/test/Transforms/Inline/invoke_test-2.ll
index 6282ff96cd0c2..2038dcb1b3c98 100644
--- a/llvm/test/Transforms/Inline/invoke_test-2.ll
+++ b/llvm/test/Transforms/Inline/invoke_test-2.ll
@@ -5,7 +5,7 @@
 
 declare void @might_throw()
 
-define internal i32 @callee() personality i32 (...)* @__gxx_personality_v0 {
+define internal i32 @callee() personality ptr @__gxx_personality_v0 {
 enrty:
   invoke void @might_throw()
       to label %cont unwind label %exc
@@ -14,14 +14,14 @@ cont:
   ret i32 0
 
 exc:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
          cleanup
   ret i32 1
 }
 
 ; caller returns true if might_throw throws an exception... callee cannot throw.
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
-; CHECK-LABEL: define i32 @caller() personality i32 (...)* @__gxx_personality_v0
+define i32 @caller() personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: define i32 @caller() personality ptr @__gxx_personality_v0
 enrty:
   %X = invoke i32 @callee()
            to label %cont unwind label %UnreachableExceptionHandler
@@ -42,7 +42,7 @@ cont:
 
 UnreachableExceptionHandler:
 ; CHECK-NOT: UnreachableExceptionHandler:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
          cleanup
   ret i32 -1
 ; CHECK-NOT: ret i32 -1

diff  --git a/llvm/test/Transforms/Inline/invoke_test-3.ll b/llvm/test/Transforms/Inline/invoke_test-3.ll
index b7d2f91888838..3bf8bea82660f 100644
--- a/llvm/test/Transforms/Inline/invoke_test-3.ll
+++ b/llvm/test/Transforms/Inline/invoke_test-3.ll
@@ -7,7 +7,7 @@
 
 declare void @might_throw()
 
-define internal i32 @callee() personality i32 (...)* @__gxx_personality_v0 {
+define internal i32 @callee() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @might_throw()
       to label %cont unwind label %exc
@@ -17,14 +17,14 @@ cont:
 
 exc:
  ; This just rethrows the exception!
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
          cleanup
-  resume { i8*, i32 } %exn
+  resume { ptr, i32 } %exn
 }
 
 ; caller returns true if might_throw throws an exception... which gets
 ; propagated by callee.
-define i32 @caller() personality i32 (...)* @__gxx_personality_v0 {
+define i32 @caller() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: define i32 @caller()
 entry:
   %X = invoke i32 @callee()
@@ -41,7 +41,7 @@ cont:
 
 Handler:
 ; This consumes an exception thrown by might_throw
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
          cleanup
   ret i32 1
 }

diff  --git a/llvm/test/Transforms/Inline/label-annotation.ll b/llvm/test/Transforms/Inline/label-annotation.ll
index bed3c8320d841..c94dc07562e42 100644
--- a/llvm/test/Transforms/Inline/label-annotation.ll
+++ b/llvm/test/Transforms/Inline/label-annotation.ll
@@ -9,7 +9,7 @@ declare void @llvm.codeview.annotation(metadata)
 
 define void @inlinee() {
 entry:
-  store i32 42, i32* @the_global
+  store i32 42, ptr @the_global
   call void @llvm.codeview.annotation(metadata !0)
   ret void
 }
@@ -23,7 +23,7 @@ entry:
 !0 = !{!"annotation"}
 
 ; CHECK-LABEL: define void @inlinee()
-; CHECK: store i32 42, i32* @the_global
+; CHECK: store i32 42, ptr @the_global
 ; CHECK: call void @llvm.codeview.annotation(metadata !0)
 ; CHECK: ret void
 

diff  --git a/llvm/test/Transforms/Inline/last-callsite.ll b/llvm/test/Transforms/Inline/last-callsite.ll
index 039f996d180d7..a50e4afe3be0a 100644
--- a/llvm/test/Transforms/Inline/last-callsite.ll
+++ b/llvm/test/Transforms/Inline/last-callsite.ll
@@ -7,14 +7,14 @@
 define internal void @test1_f() {
 entry:
   %p = alloca i32
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   ret void
 }
 
@@ -24,14 +24,14 @@ entry:
 define internal void @test1_g() {
 entry:
   %p = alloca i32
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   ret void
 }
 
@@ -61,14 +61,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -84,14 +84,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -132,14 +132,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -155,14 +155,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -202,14 +202,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -225,14 +225,14 @@ entry:
   br i1 %b, label %then, label %exit
 
 then:
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
+  store volatile i32 0, ptr %p
   br label %exit
 
 exit:
@@ -247,7 +247,7 @@ entry:
   ; constant expression. Merely inlining and deleting the call isn't enough to
   ; drop the use count here, we need to GC the dead constant expression as
   ; well.
-  call void @test4_f(i1 icmp ne (i64 ptrtoint (void (i1)* @test4_f to i64), i64 ptrtoint(void (i1)* @test4_f to i64)))
+  call void @test4_f(i1 icmp ne (i64 ptrtoint (ptr @test4_f to i64), i64 ptrtoint(ptr @test4_f to i64)))
 ; CHECK-NOT: @test4_f
 
   ; The second call is too expensive to inline unless we update the number of
@@ -260,9 +260,9 @@ entry:
   ; a second use. If this part starts failing we need to use more complex
   ; constant expressions to reference a particular function with them.
   %sink = alloca i64
-  store volatile i64 mul (i64 ptrtoint (void (i1)* @test4_g to i64), i64 ptrtoint(void (i1)* @test4_g to i64)), i64* %sink
+  store volatile i64 mul (i64 ptrtoint (ptr @test4_g to i64), i64 ptrtoint(ptr @test4_g to i64)), ptr %sink
   call void @test4_g(i1 true)
-; CHECK: store volatile i64 mul (i64 ptrtoint (void (i1)* @test4_g to i64), i64 ptrtoint (void (i1)* @test4_g to i64)), i64* %sink
+; CHECK: store volatile i64 mul (i64 ptrtoint (ptr @test4_g to i64), i64 ptrtoint (ptr @test4_g to i64)), ptr %sink
 ; CHECK: call void @test4_g(i1 true)
 
   ret void

diff  --git a/llvm/test/Transforms/Inline/launder.invariant.group.ll b/llvm/test/Transforms/Inline/launder.invariant.group.ll
index ccd722f4d88c2..71df796513697 100644
--- a/llvm/test/Transforms/Inline/launder.invariant.group.ll
+++ b/llvm/test/Transforms/Inline/launder.invariant.group.ll
@@ -2,58 +2,56 @@
 ; RUN: opt -S -O3 < %s | FileCheck %s
 ; RUN: opt -S -passes=inline -inline-threshold=1 < %s | FileCheck %s
 
-%struct.A = type <{ i32 (...)**, i32, [4 x i8] }>
+%struct.A = type <{ ptr, i32, [4 x i8] }>
 
 ; This test checks if value returned from the launder is considered aliasing
 ; with its argument.  Due to bug caused by handling launder in capture tracking
 ; sometimes it would be considered noalias.
-; CHECK-LABEL: define i32 @bar(%struct.A* noalias
-define i32 @bar(%struct.A* noalias) {
+; CHECK-LABEL: define i32 @bar(ptr noalias
+define i32 @bar(ptr noalias) {
 ; CHECK-NOT: noalias
-  %2 = bitcast %struct.A* %0 to i8*
-  %3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %2)
-  %4 = getelementptr inbounds i8, i8* %3, i64 8
-  %5 = bitcast i8* %4 to i32*
-  store i32 42, i32* %5, align 8
-  %6 = getelementptr inbounds %struct.A, %struct.A* %0, i64 0, i32 1
-  %7 = load i32, i32* %6, align 8
-  ret i32 %7
+  %2 = call ptr @llvm.launder.invariant.group.p0(ptr %0)
+  %3 = getelementptr inbounds i8, ptr %2, i64 8
+  store i32 42, ptr %3, align 8
+  %4 = getelementptr inbounds %struct.A, ptr %0, i64 0, i32 1
+  %5 = load i32, ptr %4, align 8
+  ret i32 %5
 }
 
-; CHECK-LABEL: define i32 @foo(%struct.A* noalias
-define i32 @foo(%struct.A* noalias)  {
+; CHECK-LABEL: define i32 @foo(ptr noalias
+define i32 @foo(ptr noalias)  {
   ; CHECK-NOT: call i32 @bar(
   ; CHECK-NOT: !noalias
-  %2 = tail call i32 @bar(%struct.A* %0)
+  %2 = tail call i32 @bar(ptr %0)
   ret i32 %2
 }
 
 
 ; This test checks if invariant group intrinsics have zero cost for inlining.
-; CHECK-LABEL: define i8* @caller(i8*
-define i8* @caller(i8* %p) {
-; CHECK-NOT: call i8* @lot_of_launders_and_strips
-  %a1 = call i8* @lot_of_launders_and_strips(i8* %p)
-  %a2 = call i8* @lot_of_launders_and_strips(i8* %a1)
-  %a3 = call i8* @lot_of_launders_and_strips(i8* %a2)
-  %a4 = call i8* @lot_of_launders_and_strips(i8* %a3)
-  ret i8* %a4
+; CHECK-LABEL: define ptr @caller(ptr
+define ptr @caller(ptr %p) {
+; CHECK-NOT: call ptr @lot_of_launders_and_strips
+  %a1 = call ptr @lot_of_launders_and_strips(ptr %p)
+  %a2 = call ptr @lot_of_launders_and_strips(ptr %a1)
+  %a3 = call ptr @lot_of_launders_and_strips(ptr %a2)
+  %a4 = call ptr @lot_of_launders_and_strips(ptr %a3)
+  ret ptr %a4
 }
 
-define i8* @lot_of_launders_and_strips(i8* %p) {
-  %a1 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a1)
-  %a3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a2)
-  %a4 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a3)
+define ptr @lot_of_launders_and_strips(ptr %p) {
+  %a1 = call ptr @llvm.launder.invariant.group.p0(ptr %p)
+  %a2 = call ptr @llvm.launder.invariant.group.p0(ptr %a1)
+  %a3 = call ptr @llvm.launder.invariant.group.p0(ptr %a2)
+  %a4 = call ptr @llvm.launder.invariant.group.p0(ptr %a3)
 
-  %s1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a4)
-  %s2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %s1)
-  %s3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %s2)
-  %s4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %s3)
+  %s1 = call ptr @llvm.strip.invariant.group.p0(ptr %a4)
+  %s2 = call ptr @llvm.strip.invariant.group.p0(ptr %s1)
+  %s3 = call ptr @llvm.strip.invariant.group.p0(ptr %s2)
+  %s4 = call ptr @llvm.strip.invariant.group.p0(ptr %s3)
 
-   ret i8* %s4
+   ret ptr %s4
 }
 
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
+declare ptr @llvm.strip.invariant.group.p0(ptr)

diff  --git a/llvm/test/Transforms/Inline/memprof_inline.ll b/llvm/test/Transforms/Inline/memprof_inline.ll
index c0300e07971a5..20b914d56df75 100644
--- a/llvm/test/Transforms/Inline/memprof_inline.ll
+++ b/llvm/test/Transforms/Inline/memprof_inline.ll
@@ -36,82 +36,82 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 ; Function Attrs: mustprogress uwtable
-; CHECK-LABEL: define dso_local noundef i8* @_Z3foov
-define dso_local noundef i8* @_Z3foov() #0 !dbg !39 {
+; CHECK-LABEL: define dso_local noundef ptr @_Z3foov
+define dso_local noundef ptr @_Z3foov() #0 !dbg !39 {
 entry:
   ; CHECK: call {{.*}} @_Znam
   ; CHECK-NOT: !memprof
   ; CHECK-NOT: !callsite
-  %call = call noalias noundef nonnull i8* @_Znam(i64 noundef 10) #6, !dbg !42, !memprof !43, !callsite !50
+  %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !dbg !42, !memprof !43, !callsite !50
   ; CHECK-NEXT: ret
-  ret i8* %call, !dbg !51
+  ret ptr %call, !dbg !51
 }
 
 ; Function Attrs: nobuiltin allocsize(0)
-declare noundef nonnull i8* @_Znam(i64 noundef) #1
+declare noundef nonnull ptr @_Znam(i64 noundef) #1
 
 ; Function Attrs: mustprogress uwtable
-; CHECK-LABEL: define dso_local noundef i8* @_Z4foo2v
-define dso_local noundef i8* @_Z4foo2v() #0 !dbg !52 {
+; CHECK-LABEL: define dso_local noundef ptr @_Z4foo2v
+define dso_local noundef ptr @_Z4foo2v() #0 !dbg !52 {
 entry:
   ; CHECK: call {{.*}} @_Znam{{.*}} #[[COLD:[0-9]+]]
-  %call = call noundef i8* @_Z3foov(), !dbg !53, !callsite !54
-  ret i8* %call, !dbg !55
+  %call = call noundef ptr @_Z3foov(), !dbg !53, !callsite !54
+  ret ptr %call, !dbg !55
 }
 
 ; Function Attrs: mustprogress norecurse uwtable
 ; CHECK-LABEL: define dso_local noundef i32 @main
-define dso_local noundef i32 @main(i32 noundef %argc, i8** noundef %argv) #2 !dbg !56 {
+define dso_local noundef i32 @main(i32 noundef %argc, ptr noundef %argv) #2 !dbg !56 {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 8
-  %c = alloca i8*, align 8
-  %d = alloca i8*, align 8
-  %e = alloca i8*, align 8
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 8
+  %argv.addr = alloca ptr, align 8
+  %c = alloca ptr, align 8
+  %d = alloca ptr, align 8
+  %e = alloca ptr, align 8
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
+  store ptr %argv, ptr %argv.addr, align 8
   ; CHECK: call {{.*}} @_Znam{{.*}} #[[NOTCOLD:[0-9]+]]
-  %call = call noundef i8* @_Z3foov(), !dbg !57, !callsite !58
-  store i8* %call, i8** %c, align 8, !dbg !59
+  %call = call noundef ptr @_Z3foov(), !dbg !57, !callsite !58
+  store ptr %call, ptr %c, align 8, !dbg !59
   ; CHECK: call {{.*}} @_Znam{{.*}} #[[COLD]]
-  %call1 = call noundef i8* @_Z3foov(), !dbg !60, !callsite !61
-  store i8* %call1, i8** %d, align 8, !dbg !62
+  %call1 = call noundef ptr @_Z3foov(), !dbg !60, !callsite !61
+  store ptr %call1, ptr %d, align 8, !dbg !62
   ; CHECK: call {{.*}} @_Znam{{.*}} #[[COLD]]
-  %call2 = call noundef i8* @_Z4foo2v(), !dbg !63, !callsite !64
-  store i8* %call2, i8** %e, align 8, !dbg !65
-  %0 = load i8*, i8** %c, align 8, !dbg !66
-  call void @llvm.memset.p0i8.i64(i8* align 1 %0, i8 0, i64 10, i1 false), !dbg !67
-  %1 = load i8*, i8** %d, align 8, !dbg !68
-  call void @llvm.memset.p0i8.i64(i8* align 1 %1, i8 0, i64 10, i1 false), !dbg !69
-  %2 = load i8*, i8** %e, align 8, !dbg !70
-  call void @llvm.memset.p0i8.i64(i8* align 1 %2, i8 0, i64 10, i1 false), !dbg !71
-  %3 = load i8*, i8** %c, align 8, !dbg !72
-  %isnull = icmp eq i8* %3, null, !dbg !73
+  %call2 = call noundef ptr @_Z4foo2v(), !dbg !63, !callsite !64
+  store ptr %call2, ptr %e, align 8, !dbg !65
+  %0 = load ptr, ptr %c, align 8, !dbg !66
+  call void @llvm.memset.p0.i64(ptr align 1 %0, i8 0, i64 10, i1 false), !dbg !67
+  %1 = load ptr, ptr %d, align 8, !dbg !68
+  call void @llvm.memset.p0.i64(ptr align 1 %1, i8 0, i64 10, i1 false), !dbg !69
+  %2 = load ptr, ptr %e, align 8, !dbg !70
+  call void @llvm.memset.p0.i64(ptr align 1 %2, i8 0, i64 10, i1 false), !dbg !71
+  %3 = load ptr, ptr %c, align 8, !dbg !72
+  %isnull = icmp eq ptr %3, null, !dbg !73
   br i1 %isnull, label %delete.end, label %delete.notnull, !dbg !73
 
 delete.notnull:                                   ; preds = %entry
-  call void @_ZdaPv(i8* noundef %3) #7, !dbg !74
+  call void @_ZdaPv(ptr noundef %3) #7, !dbg !74
   br label %delete.end, !dbg !74
 
 delete.end:                                       ; preds = %delete.notnull, %entry
   %call4 = call i32 @sleep(i32 noundef 200), !dbg !76
-  %4 = load i8*, i8** %d, align 8, !dbg !77
-  %isnull5 = icmp eq i8* %4, null, !dbg !78
+  %4 = load ptr, ptr %d, align 8, !dbg !77
+  %isnull5 = icmp eq ptr %4, null, !dbg !78
   br i1 %isnull5, label %delete.end7, label %delete.notnull6, !dbg !78
 
 delete.notnull6:                                  ; preds = %delete.end
-  call void @_ZdaPv(i8* noundef %4) #7, !dbg !79
+  call void @_ZdaPv(ptr noundef %4) #7, !dbg !79
   br label %delete.end7, !dbg !79
 
 delete.end7:                                      ; preds = %delete.notnull6, %delete.end
-  %5 = load i8*, i8** %e, align 8, !dbg !80
-  %isnull8 = icmp eq i8* %5, null, !dbg !81
+  %5 = load ptr, ptr %e, align 8, !dbg !80
+  %isnull8 = icmp eq ptr %5, null, !dbg !81
   br i1 %isnull8, label %delete.end10, label %delete.notnull9, !dbg !81
 
 delete.notnull9:                                  ; preds = %delete.end7
-  call void @_ZdaPv(i8* noundef %5) #7, !dbg !82
+  call void @_ZdaPv(ptr noundef %5) #7, !dbg !82
   br label %delete.end10, !dbg !82
 
 delete.end10:                                     ; preds = %delete.notnull9, %delete.end7
@@ -119,10 +119,10 @@ delete.end10:                                     ; preds = %delete.notnull9, %d
 }
 
 ; Function Attrs: argmemonly nofree nounwind willreturn writeonly
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #3
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #3
 
 ; Function Attrs: nobuiltin nounwind
-declare void @_ZdaPv(i8* noundef) #4
+declare void @_ZdaPv(ptr noundef) #4
 
 declare i32 @sleep(i32 noundef) #5
 

diff  --git a/llvm/test/Transforms/Inline/memprof_inline2.ll b/llvm/test/Transforms/Inline/memprof_inline2.ll
index edd722d361281..4d9e2cb03e14a 100644
--- a/llvm/test/Transforms/Inline/memprof_inline2.ll
+++ b/llvm/test/Transforms/Inline/memprof_inline2.ll
@@ -45,124 +45,124 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 ; Function Attrs: mustprogress uwtable
-; CHECK-LABEL: define dso_local noundef i8* @_Z3foov
-define dso_local noundef i8* @_Z3foov() #0 !dbg !39 {
+; CHECK-LABEL: define dso_local noundef ptr @_Z3foov
+define dso_local noundef ptr @_Z3foov() #0 !dbg !39 {
 entry:
   ;; We should still have memprof/callsite metadata for the non-inlined calls
   ;; from main, but should have removed those from the inlined call in_Z4foo2v.
   ;; CHECK: call {{.*}} @_Znam{{.*}} !memprof ![[ORIGMEMPROF:[0-9]+]]
-  %call = call noalias noundef nonnull i8* @_Znam(i64 noundef 10) #7, !dbg !42, !memprof !43, !callsite !52
-  ret i8* %call, !dbg !53
+  %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #7, !dbg !42, !memprof !43, !callsite !52
+  ret ptr %call, !dbg !53
 }
 
 ; Function Attrs: nobuiltin allocsize(0)
-declare noundef nonnull i8* @_Znam(i64 noundef) #1
+declare noundef nonnull ptr @_Znam(i64 noundef) #1
 
 ;; Mark noinline so we don't inline into calls from bar and baz. We should end
 ;; up with a memprof metadata on the call to foo below.
 ; Function Attrs: mustprogress noinline uwtable
-; CHECK-LABEL: define dso_local noundef i8* @_Z4foo2v
-define dso_local noundef i8* @_Z4foo2v() #2 !dbg !54 {
+; CHECK-LABEL: define dso_local noundef ptr @_Z4foo2v
+define dso_local noundef ptr @_Z4foo2v() #2 !dbg !54 {
 entry:
   ;; We should have memprof metadata for the call stacks from bar and baz,
   ;; and the callsite metadata should be the concatentation of the id from the
   ;; inlined call to new and the original callsite.
   ; CHECK: call {{.*}} @_Znam{{.*}} !memprof ![[NEWMEMPROF:[0-9]+]], !callsite ![[NEWCALLSITE:[0-9]+]]
-  %call = call noundef i8* @_Z3foov(), !dbg !55, !callsite !56
-  ret i8* %call, !dbg !57
+  %call = call noundef ptr @_Z3foov(), !dbg !55, !callsite !56
+  ret ptr %call, !dbg !57
 }
 
 ; Function Attrs: mustprogress uwtable
-define dso_local noundef i8* @_Z3barv() #0 !dbg !58 {
+define dso_local noundef ptr @_Z3barv() #0 !dbg !58 {
 entry:
-  %call = call noundef i8* @_Z4foo2v(), !dbg !59, !callsite !60
-  ret i8* %call, !dbg !61
+  %call = call noundef ptr @_Z4foo2v(), !dbg !59, !callsite !60
+  ret ptr %call, !dbg !61
 }
 
 ; Function Attrs: mustprogress uwtable
-define dso_local noundef i8* @_Z3bazv() #0 !dbg !62 {
+define dso_local noundef ptr @_Z3bazv() #0 !dbg !62 {
 entry:
-  %call = call noundef i8* @_Z4foo2v(), !dbg !63, !callsite !64
-  ret i8* %call, !dbg !65
+  %call = call noundef ptr @_Z4foo2v(), !dbg !63, !callsite !64
+  ret ptr %call, !dbg !65
 }
 
 ;; Make sure we don't propagate any memprof/callsite metadata
 ; Function Attrs: mustprogress uwtable
-; CHECK-LABEL: define dso_local noundef i8* @notprofiled
-define dso_local noundef i8* @notprofiled() #0 !dbg !66 {
+; CHECK-LABEL: define dso_local noundef ptr @notprofiled
+define dso_local noundef ptr @notprofiled() #0 !dbg !66 {
 entry:
   ; CHECK: call {{.*}} @_Znam
   ; CHECK-NOT: !memprof
   ; CHECK-NOT: !callsite
-  %call = call noundef i8* @_Z3foov(), !dbg !67
+  %call = call noundef ptr @_Z3foov(), !dbg !67
   ; CHECK-NEXT: ret
-  ret i8* %call, !dbg !68
+  ret ptr %call, !dbg !68
 }
 
 ; Function Attrs: mustprogress noinline norecurse optnone uwtable
-define dso_local noundef i32 @main(i32 noundef %argc, i8** noundef %argv) #3 !dbg !69 {
+define dso_local noundef i32 @main(i32 noundef %argc, ptr noundef %argv) #3 !dbg !69 {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 8
-  %c = alloca i8*, align 8
-  %d = alloca i8*, align 8
-  %e = alloca i8*, align 8
-  %f = alloca i8*, align 8
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 8
+  %argv.addr = alloca ptr, align 8
+  %c = alloca ptr, align 8
+  %d = alloca ptr, align 8
+  %e = alloca ptr, align 8
+  %f = alloca ptr, align 8
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
+  store ptr %argv, ptr %argv.addr, align 8
   ;; The below 4 callsites are all annotated as noinline
-  %call = call noundef i8* @_Z3foov() #8, !dbg !70, !callsite !71
-  store i8* %call, i8** %c, align 8, !dbg !72
-  %call1 = call noundef i8* @_Z3foov() #8, !dbg !73, !callsite !74
-  store i8* %call1, i8** %d, align 8, !dbg !75
-  %call2 = call noundef i8* @_Z3barv() #8, !dbg !76, !callsite !77
-  store i8* %call2, i8** %e, align 8, !dbg !78
-  %call3 = call noundef i8* @_Z3bazv() #8, !dbg !79, !callsite !80
-  store i8* %call3, i8** %f, align 8, !dbg !81
-  %0 = load i8*, i8** %c, align 8, !dbg !82
-  call void @llvm.memset.p0i8.i64(i8* align 1 %0, i8 0, i64 10, i1 false), !dbg !83
-  %1 = load i8*, i8** %d, align 8, !dbg !84
-  call void @llvm.memset.p0i8.i64(i8* align 1 %1, i8 0, i64 10, i1 false), !dbg !85
-  %2 = load i8*, i8** %e, align 8, !dbg !86
-  call void @llvm.memset.p0i8.i64(i8* align 1 %2, i8 0, i64 10, i1 false), !dbg !87
-  %3 = load i8*, i8** %f, align 8, !dbg !88
-  call void @llvm.memset.p0i8.i64(i8* align 1 %3, i8 0, i64 10, i1 false), !dbg !89
-  %4 = load i8*, i8** %c, align 8, !dbg !90
-  %isnull = icmp eq i8* %4, null, !dbg !91
+  %call = call noundef ptr @_Z3foov() #8, !dbg !70, !callsite !71
+  store ptr %call, ptr %c, align 8, !dbg !72
+  %call1 = call noundef ptr @_Z3foov() #8, !dbg !73, !callsite !74
+  store ptr %call1, ptr %d, align 8, !dbg !75
+  %call2 = call noundef ptr @_Z3barv() #8, !dbg !76, !callsite !77
+  store ptr %call2, ptr %e, align 8, !dbg !78
+  %call3 = call noundef ptr @_Z3bazv() #8, !dbg !79, !callsite !80
+  store ptr %call3, ptr %f, align 8, !dbg !81
+  %0 = load ptr, ptr %c, align 8, !dbg !82
+  call void @llvm.memset.p0.i64(ptr align 1 %0, i8 0, i64 10, i1 false), !dbg !83
+  %1 = load ptr, ptr %d, align 8, !dbg !84
+  call void @llvm.memset.p0.i64(ptr align 1 %1, i8 0, i64 10, i1 false), !dbg !85
+  %2 = load ptr, ptr %e, align 8, !dbg !86
+  call void @llvm.memset.p0.i64(ptr align 1 %2, i8 0, i64 10, i1 false), !dbg !87
+  %3 = load ptr, ptr %f, align 8, !dbg !88
+  call void @llvm.memset.p0.i64(ptr align 1 %3, i8 0, i64 10, i1 false), !dbg !89
+  %4 = load ptr, ptr %c, align 8, !dbg !90
+  %isnull = icmp eq ptr %4, null, !dbg !91
   br i1 %isnull, label %delete.end, label %delete.notnull, !dbg !91
 
 delete.notnull:                                   ; preds = %entry
-  call void @_ZdaPv(i8* noundef %4) #9, !dbg !92
+  call void @_ZdaPv(ptr noundef %4) #9, !dbg !92
   br label %delete.end, !dbg !92
 
 delete.end:                                       ; preds = %delete.notnull, %entry
   %call4 = call i32 @sleep(i32 noundef 200), !dbg !94
-  %5 = load i8*, i8** %d, align 8, !dbg !95
-  %isnull5 = icmp eq i8* %5, null, !dbg !96
+  %5 = load ptr, ptr %d, align 8, !dbg !95
+  %isnull5 = icmp eq ptr %5, null, !dbg !96
   br i1 %isnull5, label %delete.end7, label %delete.notnull6, !dbg !96
 
 delete.notnull6:                                  ; preds = %delete.end
-  call void @_ZdaPv(i8* noundef %5) #9, !dbg !97
+  call void @_ZdaPv(ptr noundef %5) #9, !dbg !97
   br label %delete.end7, !dbg !97
 
 delete.end7:                                      ; preds = %delete.notnull6, %delete.end
-  %6 = load i8*, i8** %e, align 8, !dbg !98
-  %isnull8 = icmp eq i8* %6, null, !dbg !99
+  %6 = load ptr, ptr %e, align 8, !dbg !98
+  %isnull8 = icmp eq ptr %6, null, !dbg !99
   br i1 %isnull8, label %delete.end10, label %delete.notnull9, !dbg !99
 
 delete.notnull9:                                  ; preds = %delete.end7
-  call void @_ZdaPv(i8* noundef %6) #9, !dbg !100
+  call void @_ZdaPv(ptr noundef %6) #9, !dbg !100
   br label %delete.end10, !dbg !100
 
 delete.end10:                                     ; preds = %delete.notnull9, %delete.end7
-  %7 = load i8*, i8** %f, align 8, !dbg !101
-  %isnull11 = icmp eq i8* %7, null, !dbg !102
+  %7 = load ptr, ptr %f, align 8, !dbg !101
+  %isnull11 = icmp eq ptr %7, null, !dbg !102
   br i1 %isnull11, label %delete.end13, label %delete.notnull12, !dbg !102
 
 delete.notnull12:                                 ; preds = %delete.end10
-  call void @_ZdaPv(i8* noundef %7) #9, !dbg !103
+  call void @_ZdaPv(ptr noundef %7) #9, !dbg !103
   br label %delete.end13, !dbg !103
 
 delete.end13:                                     ; preds = %delete.notnull12, %delete.end10
@@ -170,10 +170,10 @@ delete.end13:                                     ; preds = %delete.notnull12, %
 }
 
 ; Function Attrs: argmemonly nofree nounwind willreturn writeonly
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #4
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #4
 
 ; Function Attrs: nobuiltin nounwind
-declare void @_ZdaPv(i8* noundef) #5
+declare void @_ZdaPv(ptr noundef) #5
 
 declare i32 @sleep(i32 noundef) #6
 

diff  --git a/llvm/test/Transforms/Inline/module-inlining.ll b/llvm/test/Transforms/Inline/module-inlining.ll
index 00bfabd4982d3..f22c0f580b6e4 100644
--- a/llvm/test/Transforms/Inline/module-inlining.ll
+++ b/llvm/test/Transforms/Inline/module-inlining.ll
@@ -9,17 +9,17 @@
 ; RUN: opt -passes=inliner-wrapper -S < %s | FileCheck %s --check-prefix=INLINE --check-prefix=CHECK
 ; RUN: opt -passes=scc-oz-module-inliner -S < %s | FileCheck %s --check-prefix=MODULE --check-prefix=CHECK
 
-define void @modify_value({i32, float}* %v) {
-    %f = getelementptr { i32, float }, { i32, float }* %v, i64 0, i32 0
-    store i32 10, i32* %f
+define void @modify_value(ptr %v) {
+    %f = getelementptr { i32, float }, ptr %v, i64 0, i32 0
+    store i32 10, ptr %f
     ret void
 }
 
 define i32 @main() {
     %my_val = alloca {i32, float}
-    call void @modify_value({i32, float}* %my_val)
-    %f = getelementptr { i32, float }, { i32, float }* %my_val, i64 0, i32 0
-    %ret = load i32, i32* %f
+    call void @modify_value(ptr %my_val)
+    %f = getelementptr { i32, float }, ptr %my_val, i64 0, i32 0
+    %ret = load i32, ptr %f
     ret i32 %ret
 }
 

diff  --git a/llvm/test/Transforms/Inline/monster_scc.ll b/llvm/test/Transforms/Inline/monster_scc.ll
index 0be9cbb8085d7..d2bdd70a93923 100644
--- a/llvm/test/Transforms/Inline/monster_scc.ll
+++ b/llvm/test/Transforms/Inline/monster_scc.ll
@@ -58,23 +58,23 @@ declare void @_Z1gi(i32)
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi2EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb0ELi0EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi1EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi1EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -91,24 +91,24 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi2EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb1ELi0EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb1ELi0EEvPbS0_(ptr %B, ptr %E) {
 entry:
   call void @_Z1gi(i32 0)
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi1EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi1EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi1EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -123,23 +123,23 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb0ELi1EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb0ELi1EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi2EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi2EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -160,25 +160,25 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb1ELi1EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb1ELi1EEvPbS0_(ptr %B, ptr %E) {
 entry:
   call void @_Z1gi(i32 1)
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
 ; CHECK-NOT: call
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi2EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi2EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi2EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -197,23 +197,23 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb0ELi2EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb0ELi2EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi3EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi3EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -234,24 +234,24 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb1ELi2EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb1ELi2EEvPbS0_(ptr %B, ptr %E) {
 entry:
   call void @_Z1gi(i32 2)
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi3EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi3EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi3EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -268,23 +268,23 @@ if.end3:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb0ELi3EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb0ELi3EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi4EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi4EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -299,24 +299,24 @@ if.end3:
 ; CHECK-NOT: call
 ; CHECK: call void @_Z1fILb0ELi0EEvPbS0_(
 ; CHECK-NOT: call
-define void @_Z1fILb1ELi3EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb1ELi3EEvPbS0_(ptr %B, ptr %E) {
 entry:
   call void @_Z1gi(i32 3)
-  %cmp = icmp eq i8* %B, %E
+  %cmp = icmp eq ptr %B, %E
   br i1 %cmp, label %if.end3, label %if.end
 
 if.end:
-  %0 = load i8, i8* %B, align 1
+  %0 = load i8, ptr %B, align 1
   %tobool = icmp eq i8 %0, 0
-  %add.ptr2 = getelementptr inbounds i8, i8* %B, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %B, i64 1
   br i1 %tobool, label %if.else, label %if.then1
 
 if.then1:
-  call void @_Z1fILb1ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb1ELi4EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.else:
-  call void @_Z1fILb0ELi4EEvPbS0_(i8* %add.ptr2, i8* %E)
+  call void @_Z1fILb0ELi4EEvPbS0_(ptr %add.ptr2, ptr %E)
   br label %if.end3
 
 if.end3:
@@ -327,9 +327,9 @@ if.end3:
 ; CHECK-NOT: call
 ; CHECK: call void @_Z1fILb0ELi0EEvPbS0_(
 ; CHECK-NOT: call
-define void @_Z1fILb0ELi4EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb0ELi4EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  call void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E)
+  call void @_Z1fILb0ELi0EEvPbS0_(ptr %B, ptr %E)
   ret void
 }
 
@@ -341,18 +341,18 @@ entry:
 ; NEW-NOT: call
 ; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
 ; NEW-NOT: call
-define void @_Z1fILb1ELi4EEvPbS0_(i8* %B, i8* %E) {
+define void @_Z1fILb1ELi4EEvPbS0_(ptr %B, ptr %E) {
 entry:
-  call void @_Z1fILb1ELi0EEvPbS0_(i8* %B, i8* %E)
+  call void @_Z1fILb1ELi0EEvPbS0_(ptr %B, ptr %E)
   ret void
 }
 
 ; CHECK-LABEL: define void @_Z4testPbS_(
 ; CHECK: call
 ; CHECK-NOT: call
-define void @_Z4testPbS_(i8* %B, i8* %E) {
+define void @_Z4testPbS_(ptr %B, ptr %E) {
 entry:
-  call void @_Z1fILb0ELi0EEvPbS0_(i8* %B, i8* %E)
+  call void @_Z1fILb0ELi0EEvPbS0_(ptr %B, ptr %E)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/nested-inline.ll b/llvm/test/Transforms/Inline/nested-inline.ll
index fb8a7bc9e27d6..703a8cbcb9724 100644
--- a/llvm/test/Transforms/Inline/nested-inline.ll
+++ b/llvm/test/Transforms/Inline/nested-inline.ll
@@ -2,20 +2,20 @@
 ; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
 ; RUN: opt < %s -passes='module-inline' -S | FileCheck %s
 ; Test that bar and bar2 are both inlined throughout and removed.
- at A = weak global i32 0		; <i32*> [#uses=1]
- at B = weak global i32 0		; <i32*> [#uses=1]
- at C = weak global i32 0		; <i32*> [#uses=1]
+ at A = weak global i32 0		; <ptr> [#uses=1]
+ at B = weak global i32 0		; <ptr> [#uses=1]
+ at C = weak global i32 0		; <ptr> [#uses=1]
 
 define fastcc void @foo(i32 %X) {
 entry:
 ; CHECK-LABEL: @foo(
-	%ALL = alloca i32, align 4		; <i32*> [#uses=1]
+	%ALL = alloca i32, align 4		; <ptr> [#uses=1]
 	%tmp1 = and i32 %X, 1		; <i32> [#uses=1]
 	%tmp1.upgrd.1 = icmp eq i32 %tmp1, 0		; <i1> [#uses=1]
 	br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
 
 cond_true:		; preds = %entry
-	store i32 1, i32* @A
+	store i32 1, ptr @A
 	br label %cond_next
 
 cond_next:		; preds = %cond_true, %entry
@@ -24,7 +24,7 @@ cond_next:		; preds = %cond_true, %entry
 	br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
 
 cond_true5:		; preds = %cond_next
-	store i32 1, i32* @B
+	store i32 1, ptr @B
 	br label %cond_next7
 
 cond_next7:		; preds = %cond_true5, %cond_next
@@ -33,7 +33,7 @@ cond_next7:		; preds = %cond_true5, %cond_next
 	br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
 
 cond_true11:		; preds = %cond_next7
-	store i32 1, i32* @C
+	store i32 1, ptr @C
 	br label %cond_next13
 
 cond_next13:		; preds = %cond_true11, %cond_next7
@@ -42,7 +42,7 @@ cond_next13:		; preds = %cond_true11, %cond_next7
 	br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
 
 cond_true17:		; preds = %cond_next13
-	call void @ext( i32* %ALL )
+	call void @ext( ptr %ALL )
 	ret void
 
 UnifiedReturnBlock:		; preds = %cond_next13
@@ -52,13 +52,13 @@ UnifiedReturnBlock:		; preds = %cond_next13
 ; CHECK-NOT: @bar(
 define internal fastcc void @bar(i32 %X) {
 entry:
-	%ALL = alloca i32, align 4		; <i32*> [#uses=1]
+	%ALL = alloca i32, align 4		; <ptr> [#uses=1]
 	%tmp1 = and i32 %X, 1		; <i32> [#uses=1]
 	%tmp1.upgrd.1 = icmp eq i32 %tmp1, 0		; <i1> [#uses=1]
 	br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
 
 cond_true:		; preds = %entry
-	store i32 1, i32* @A
+	store i32 1, ptr @A
 	br label %cond_next
 
 cond_next:		; preds = %cond_true, %entry
@@ -67,7 +67,7 @@ cond_next:		; preds = %cond_true, %entry
 	br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
 
 cond_true5:		; preds = %cond_next
-	store i32 1, i32* @B
+	store i32 1, ptr @B
 	br label %cond_next7
 
 cond_next7:		; preds = %cond_true5, %cond_next
@@ -76,7 +76,7 @@ cond_next7:		; preds = %cond_true5, %cond_next
 	br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
 
 cond_true11:		; preds = %cond_next7
-	store i32 1, i32* @C
+	store i32 1, ptr @C
 	br label %cond_next13
 
 cond_next13:		; preds = %cond_true11, %cond_next7
@@ -98,7 +98,7 @@ entry:
 	ret void
 }
 
-declare void @ext(i32*)
+declare void @ext(ptr)
 
 define void @test(i32 %X) {
 entry:

diff  --git a/llvm/test/Transforms/Inline/no-inline-line-tables.ll b/llvm/test/Transforms/Inline/no-inline-line-tables.ll
index f0acde1363b25..e298088d1fce5 100644
--- a/llvm/test/Transforms/Inline/no-inline-line-tables.ll
+++ b/llvm/test/Transforms/Inline/no-inline-line-tables.ll
@@ -12,9 +12,9 @@ target triple = "x86_64-unknown-windows-msvc"
 define dso_local i32 @f(i32 %x) #0 !dbg !7 {
 entry:
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
-  %0 = load i32, i32* %x.addr, align 4, !dbg !14
+  store i32 %x, ptr %x.addr, align 4
+  call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !12, metadata !DIExpression()), !dbg !13
+  %0 = load i32, ptr %x.addr, align 4, !dbg !14
   ret i32 %0, !dbg !14
 }
 
@@ -25,14 +25,14 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
 define i32 @g(i32 %x) #0 !dbg !15 {
 entry:
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !16, metadata !DIExpression()), !dbg !17
+  store i32 %x, ptr %x.addr, align 4
+  call void @llvm.dbg.declare(metadata ptr %x.addr, metadata !16, metadata !DIExpression()), !dbg !17
   br label %L, !dbg !17
 
 L:                                                ; preds = %entry
   call void @llvm.dbg.label(metadata !18), !dbg !19
-  store i32 42, i32* %x.addr, align 4, !dbg !20
-  %0 = load i32, i32* %x.addr, align 4, !dbg !21
+  store i32 42, ptr %x.addr, align 4, !dbg !20
+  %0 = load i32, ptr %x.addr, align 4, !dbg !21
   ret i32 %0, !dbg !21
 }
 
@@ -47,14 +47,14 @@ entry:
 ; CHECK-LABEL: @main()
 ; CHECK-NOT: @f
 ; CHECK-NOT: @llvm.dbg.declare
-; CHECK: %{{[0-9]+}} = load i32, i32* %x.addr.i, align 4, !dbg ![[VAR1:[0-9]+]]
+; CHECK: %{{[0-9]+}} = load i32, ptr %x.addr.i, align 4, !dbg ![[VAR1:[0-9]+]]
   %call = call i32 @f(i32 3), !dbg !25
 
 ; Another test for inlining debug intrinsics where the intrinsic appears at the
 ; start of the basic block.
 ; CHECK-NOT: @g
 ; CHECK-NOT: @llvm.dbg.label
-; CHECK: %{{[0-9]+}} = load i32, i32* %x.addr.i1, align 4, !dbg ![[VAR2:[0-9]+]]
+; CHECK: %{{[0-9]+}} = load i32, ptr %x.addr.i1, align 4, !dbg ![[VAR2:[0-9]+]]
   %call1 = call i32 @g(i32 340), !dbg !26
   ret i32 0, !dbg !27
 }

diff  --git a/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll b/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll
index f1a80bdf4ff08..bfe51e81575f2 100644
--- a/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll
+++ b/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll
@@ -10,14 +10,14 @@ entry:
   unreachable
 }
 
-define dso_local void @proxy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @proxy() personality ptr @__gxx_personality_v0 {
 entry:
   call void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"()
   call void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"()
   ret void
 }
 
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
 entry:
 ; CHECK: define dso_local void @test
 ; CHECK-NOT: invoke void @proxy()
@@ -31,16 +31,16 @@ invoke.cont:
   ret void
 
 lpad:
-; CHECK: %0 = landingpad { i8*, i32 }
-; CHECK: resume { i8*, i32 } %0
+; CHECK: %0 = landingpad { ptr, i32 }
+; CHECK: resume { ptr, i32 } %0
 
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
-  resume { i8*, i32 } %0
+  call void (ptr, ...) @printf(ptr @.str.2)
+  resume { ptr, i32 } %0
 
 }
 
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)

diff  --git a/llvm/test/Transforms/Inline/noalias-calls-always.ll b/llvm/test/Transforms/Inline/noalias-calls-always.ll
index a26e7c0d76d93..9c851b9327839 100644
--- a/llvm/test/Transforms/Inline/noalias-calls-always.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls-always.ll
@@ -3,88 +3,88 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
 declare void @hey() #0
 
-define void @hello(i8* noalias nocapture %a, i8* noalias nocapture readonly %c, i8* nocapture %b) #1 {
+define void @hello(ptr noalias nocapture %a, ptr noalias nocapture readonly %c, ptr nocapture %b) #1 {
 ; CHECK-LABEL: @hello(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L:%.*]] = alloca i8, i32 512, align 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A:%.*]], i8* align 16 [[B:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    call void @hey()
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %l = alloca i8, i32 512, align 1
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %b, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %b, i8* align 16 %c, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %b, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %b, ptr align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %c, i64 16, i1 0)
   call void @hey()
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %l, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %l, ptr align 16 %c, i64 16, i1 0)
   ret void
 }
 
-define void @foo(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #2 {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #2 {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L_I:%.*]] = alloca i8, i32 512, align 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 512, i8* [[L_I]])
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A:%.*]], i8* align 16 [[B:%.*]], i64 16, i1 false), !noalias !3
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C:%.*]], i64 16, i1 false), !noalias !0
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false), !alias.scope !5
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias !3
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C:%.*]], i64 16, i1 false), !noalias !0
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false), !alias.scope !5
 ; CHECK-NEXT:    call void @hey(), !noalias !5
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L_I]], i8* align 16 [[C]], i64 16, i1 false), !noalias !0
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 512, i8* [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr align 16 [[C]], i64 16, i1 false), !noalias !0
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello(i8* %a, i8* %c, i8* %b)
+  tail call void @hello(ptr %a, ptr %c, ptr %b)
   ret void
 }
 
-define void @hello_cs(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #1 {
+define void @hello_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #1 {
 ; CHECK-LABEL: @hello_cs(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L:%.*]] = alloca i8, i32 512, align 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A:%.*]], i8* align 16 [[B:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C:%.*]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C:%.*]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    call void @hey()
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %l = alloca i8, i32 512, align 1
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %b, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %b, i8* align 16 %c, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %b, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %b, ptr align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %c, i64 16, i1 0)
   call void @hey()
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %l, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %l, ptr align 16 %c, i64 16, i1 0)
   ret void
 }
 
-define void @foo_cs(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #2 {
+define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #2 {
 ; CHECK-LABEL: @foo_cs(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L_I:%.*]] = alloca i8, i32 512, align 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 512, i8* [[L_I]])
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A:%.*]], i8* align 16 [[B:%.*]], i64 16, i1 false), !noalias !9
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C:%.*]], i64 16, i1 false), !noalias !6
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false), !alias.scope !11
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias !9
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C:%.*]], i64 16, i1 false), !noalias !6
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false), !alias.scope !11
 ; CHECK-NEXT:    call void @hey(), !noalias !11
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L_I]], i8* align 16 [[C]], i64 16, i1 false), !noalias !6
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 512, i8* [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr align 16 [[C]], i64 16, i1 false), !noalias !6
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello_cs(i8* noalias %a, i8* noalias %c, i8* %b)
+  tail call void @hello_cs(ptr noalias %a, ptr noalias %c, ptr %b)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/noalias-calls.ll b/llvm/test/Transforms/Inline/noalias-calls.ll
index e6fc770e70a11..e3791da54b232 100644
--- a/llvm/test/Transforms/Inline/noalias-calls.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls.ll
@@ -4,92 +4,92 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
 declare void @hey() #0
 
-define void @hello(i8* noalias nocapture %a, i8* noalias nocapture readonly %c, i8* nocapture %b) #1 {
+define void @hello(ptr noalias nocapture %a, ptr noalias nocapture readonly %c, ptr nocapture %b) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@hello
-; CHECK-SAME: (i8* noalias nocapture [[A:%.*]], i8* noalias nocapture readonly [[C:%.*]], i8* nocapture [[B:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-SAME: (ptr noalias nocapture [[A:%.*]], ptr noalias nocapture readonly [[C:%.*]], ptr nocapture [[B:%.*]]) #[[ATTR2:[0-9]+]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L:%.*]] = alloca i8, i32 512, align 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[B]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    call void @hey()
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %l = alloca i8, i32 512, align 1
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %b, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %b, i8* align 16 %c, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %b, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %b, ptr align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %c, i64 16, i1 0)
   call void @hey()
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %l, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %l, ptr align 16 %c, i64 16, i1 0)
   ret void
 }
 
-define void @foo(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #2 {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #2 {
 ; CHECK-LABEL: define {{[^@]+}}@foo
-; CHECK-SAME: (i8* nocapture [[A:%.*]], i8* nocapture readonly [[C:%.*]], i8* nocapture [[B:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture readonly [[C:%.*]], ptr nocapture [[B:%.*]]) #[[ATTR3:[0-9]+]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L_I:%.*]] = alloca i8, i32 512, align 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 512, i8* [[L_I]])
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[B]], i64 16, i1 false), !noalias !3
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C]], i64 16, i1 false), !noalias !0
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false), !alias.scope !5
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias !3
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C]], i64 16, i1 false), !noalias !0
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false), !alias.scope !5
 ; CHECK-NEXT:    call void @hey(), !noalias !5
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L_I]], i8* align 16 [[C]], i64 16, i1 false), !noalias !0
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 512, i8* [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr align 16 [[C]], i64 16, i1 false), !noalias !0
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello(i8* %a, i8* %c, i8* %b)
+  tail call void @hello(ptr %a, ptr %c, ptr %b)
   ret void
 }
 
-define void @hello_cs(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #1 {
+define void @hello_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@hello_cs
-; CHECK-SAME: (i8* nocapture [[A:%.*]], i8* nocapture readonly [[C:%.*]], i8* nocapture [[B:%.*]]) #[[ATTR2]] {
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture readonly [[C:%.*]], ptr nocapture [[B:%.*]]) #[[ATTR2]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L:%.*]] = alloca i8, i32 512, align 1
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[B]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C]], i64 16, i1 false)
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    call void @hey()
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L]], i8* align 16 [[C]], i64 16, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L]], ptr align 16 [[C]], i64 16, i1 false)
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %l = alloca i8, i32 512, align 1
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %b, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %b, i8* align 16 %c, i64 16, i1 0)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %a, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %b, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %b, ptr align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %a, ptr align 16 %c, i64 16, i1 0)
   call void @hey()
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %l, i8* align 16 %c, i64 16, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %l, ptr align 16 %c, i64 16, i1 0)
   ret void
 }
 
-define void @foo_cs(i8* nocapture %a, i8* nocapture readonly %c, i8* nocapture %b) #2 {
+define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b) #2 {
 ; CHECK-LABEL: define {{[^@]+}}@foo_cs
-; CHECK-SAME: (i8* nocapture [[A:%.*]], i8* nocapture readonly [[C:%.*]], i8* nocapture [[B:%.*]]) #[[ATTR3]] {
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture readonly [[C:%.*]], ptr nocapture [[B:%.*]]) #[[ATTR3]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L_I:%.*]] = alloca i8, i32 512, align 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 512, i8* [[L_I]])
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[B]], i64 16, i1 false), !noalias !9
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[B]], i8* align 16 [[C]], i64 16, i1 false), !noalias !6
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[A]], i8* align 16 [[C]], i64 16, i1 false), !alias.scope !11
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias !9
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr align 16 [[C]], i64 16, i1 false), !noalias !6
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[C]], i64 16, i1 false), !alias.scope !11
 ; CHECK-NEXT:    call void @hey(), !noalias !11
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[L_I]], i8* align 16 [[C]], i64 16, i1 false), !noalias !6
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 512, i8* [[L_I]])
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr align 16 [[C]], i64 16, i1 false), !noalias !6
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello_cs(i8* noalias %a, i8* noalias %c, i8* %b)
+  tail call void @hello_cs(ptr noalias %a, ptr noalias %c, ptr %b)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/noalias-calls2.ll b/llvm/test/Transforms/Inline/noalias-calls2.ll
index 6148e50686b3d..bbfc03bf2bef7 100644
--- a/llvm/test/Transforms/Inline/noalias-calls2.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls2.ll
@@ -7,105 +7,105 @@ target triple = "x86_64-unknown-linux-gnu"
 
 declare void @llvm.experimental.noalias.scope.decl(metadata) #0
 
-define void @caller_equals_callee(i32* noalias %p0, i32* noalias %p1, i32 %cnt) {
+define void @caller_equals_callee(ptr noalias %p0, ptr noalias %p1, i32 %cnt) {
 ; CHECK-LABEL: define {{[^@]+}}@caller_equals_callee
-; CHECK-SAME: (i32* noalias [[P0:%.*]], i32* noalias [[P1:%.*]], i32 [[CNT:%.*]]) {
+; CHECK-SAME: (ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i32 [[CNT:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[P0]], i64 2
-; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[P1]], i64 2
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[P0]], i64 2
+; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 2
 ; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata !0)
 ; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata !3)
-; CHECK-NEXT:    store i32 10, i32* [[ADD_PTR]], align 4, !alias.scope !0, !noalias !3
-; CHECK-NEXT:    store i32 20, i32* [[ADD_PTR1]], align 4, !alias.scope !3, !noalias !0
+; CHECK-NEXT:    store i32 10, ptr [[ADD_PTR]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT:    store i32 20, ptr [[ADD_PTR1]], align 4, !alias.scope !3, !noalias !0
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[CNT]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    store i32 11, i32* [[P0]], align 4
+; CHECK-NEXT:    store i32 11, ptr [[P0]], align 4
 ; CHECK-NEXT:    br label [[IF_END:%.*]]
 ; CHECK:       if.else:
-; CHECK-NEXT:    [[ADD_PTR2:%.*]] = getelementptr inbounds i32, i32* [[P1]], i64 1
-; CHECK-NEXT:    [[ADD_PTR3:%.*]] = getelementptr inbounds i32, i32* [[P0]], i64 1
+; CHECK-NEXT:    [[ADD_PTR2:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 1
+; CHECK-NEXT:    [[ADD_PTR3:%.*]] = getelementptr inbounds i32, ptr [[P0]], i64 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !5)
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !8)
-; CHECK-NEXT:    [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR2]], i64 2
-; CHECK-NEXT:    [[ADD_PTR1_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR3]], i64 2
+; CHECK-NEXT:    [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR2]], i64 2
+; CHECK-NEXT:    [[ADD_PTR1_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR3]], i64 2
 ; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata !10)
 ; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata !13)
-; CHECK-NEXT:    store i32 10, i32* [[ADD_PTR_I]], align 4, !alias.scope !15, !noalias !16
-; CHECK-NEXT:    store i32 20, i32* [[ADD_PTR1_I]], align 4, !alias.scope !16, !noalias !15
-; CHECK-NEXT:    store i32 11, i32* [[ADD_PTR2]], align 4, !alias.scope !5, !noalias !8
-; CHECK-NEXT:    store i32 12, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 10, ptr [[ADD_PTR_I]], align 4, !alias.scope !15, !noalias !16
+; CHECK-NEXT:    store i32 20, ptr [[ADD_PTR1_I]], align 4, !alias.scope !16, !noalias !15
+; CHECK-NEXT:    store i32 11, ptr [[ADD_PTR2]], align 4, !alias.scope !5, !noalias !8
+; CHECK-NEXT:    store i32 12, ptr [[P1]], align 4
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %p0, i64 2
-  %add.ptr1 = getelementptr inbounds i32, i32* %p1, i64 2
+  %add.ptr = getelementptr inbounds i32, ptr %p0, i64 2
+  %add.ptr1 = getelementptr inbounds i32, ptr %p1, i64 2
   tail call void @llvm.experimental.noalias.scope.decl(metadata !0)
   tail call void @llvm.experimental.noalias.scope.decl(metadata !3)
-  store i32 10, i32* %add.ptr, align 4, !alias.scope !0, !noalias !3
-  store i32 20, i32* %add.ptr1, align 4, !alias.scope !3, !noalias !0
+  store i32 10, ptr %add.ptr, align 4, !alias.scope !0, !noalias !3
+  store i32 20, ptr %add.ptr1, align 4, !alias.scope !3, !noalias !0
   %cmp = icmp eq i32 %cnt, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 11, i32* %p0, align 4
+  store i32 11, ptr %p0, align 4
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  %add.ptr2 = getelementptr inbounds i32, i32* %p1, i64 1
-  %add.ptr3 = getelementptr inbounds i32, i32* %p0, i64 1
-  tail call void @caller_equals_callee(i32* nonnull %add.ptr2, i32* nonnull %add.ptr3, i32 0)
-  store i32 12, i32* %p1, align 4
+  %add.ptr2 = getelementptr inbounds i32, ptr %p1, i64 1
+  %add.ptr3 = getelementptr inbounds i32, ptr %p0, i64 1
+  tail call void @caller_equals_callee(ptr nonnull %add.ptr2, ptr nonnull %add.ptr3, i32 0)
+  store i32 12, ptr %p1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   ret void
 }
 
-define void @test01(i32* noalias %p0, i32* noalias %p1, i32 %cnt) {
+define void @test01(ptr noalias %p0, ptr noalias %p1, i32 %cnt) {
 ; CHECK-LABEL: define {{[^@]+}}@test01
-; CHECK-SAME: (i32* noalias [[P0:%.*]], i32* noalias [[P1:%.*]], i32 [[CNT:%.*]]) {
+; CHECK-SAME: (ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i32 [[CNT:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 13, i32* [[P0]], align 4
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[P0]], i64 1
-; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[P1]], i64 1
+; CHECK-NEXT:    store i32 13, ptr [[P0]], align 4
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[P0]], i64 1
+; CHECK-NEXT:    [[ADD_PTR1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !17)
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !20)
-; CHECK-NEXT:    [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR]], i64 2
-; CHECK-NEXT:    [[ADD_PTR1_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR1]], i64 2
+; CHECK-NEXT:    [[ADD_PTR_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR]], i64 2
+; CHECK-NEXT:    [[ADD_PTR1_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR1]], i64 2
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !22)
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !25)
-; CHECK-NEXT:    store i32 10, i32* [[ADD_PTR_I]], align 4, !alias.scope !27, !noalias !28
-; CHECK-NEXT:    store i32 20, i32* [[ADD_PTR1_I]], align 4, !alias.scope !28, !noalias !27
+; CHECK-NEXT:    store i32 10, ptr [[ADD_PTR_I]], align 4, !alias.scope !27, !noalias !28
+; CHECK-NEXT:    store i32 20, ptr [[ADD_PTR1_I]], align 4, !alias.scope !28, !noalias !27
 ; CHECK-NEXT:    [[CMP_I:%.*]] = icmp eq i32 [[CNT]], 0
 ; CHECK-NEXT:    br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_ELSE_I:%.*]]
 ; CHECK:       if.then.i:
-; CHECK-NEXT:    store i32 11, i32* [[ADD_PTR]], align 4, !alias.scope !17, !noalias !20
+; CHECK-NEXT:    store i32 11, ptr [[ADD_PTR]], align 4, !alias.scope !17, !noalias !20
 ; CHECK-NEXT:    br label [[CALLER_EQUALS_CALLEE_EXIT:%.*]]
 ; CHECK:       if.else.i:
-; CHECK-NEXT:    [[ADD_PTR2_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR1]], i64 1
-; CHECK-NEXT:    [[ADD_PTR3_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR]], i64 1
+; CHECK-NEXT:    [[ADD_PTR2_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR1]], i64 1
+; CHECK-NEXT:    [[ADD_PTR3_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR]], i64 1
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !29)
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !32)
-; CHECK-NEXT:    [[ADD_PTR_I_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR2_I]], i64 2
-; CHECK-NEXT:    [[ADD_PTR1_I_I:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR3_I]], i64 2
+; CHECK-NEXT:    [[ADD_PTR_I_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR2_I]], i64 2
+; CHECK-NEXT:    [[ADD_PTR1_I_I:%.*]] = getelementptr inbounds i32, ptr [[ADD_PTR3_I]], i64 2
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !34)
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !37)
-; CHECK-NEXT:    store i32 10, i32* [[ADD_PTR_I_I]], align 4, !alias.scope !39, !noalias !40
-; CHECK-NEXT:    store i32 20, i32* [[ADD_PTR1_I_I]], align 4, !alias.scope !40, !noalias !39
-; CHECK-NEXT:    store i32 11, i32* [[ADD_PTR2_I]], align 4, !alias.scope !41, !noalias !42
-; CHECK-NEXT:    store i32 12, i32* [[ADD_PTR1]], align 4, !alias.scope !20, !noalias !17
+; CHECK-NEXT:    store i32 10, ptr [[ADD_PTR_I_I]], align 4, !alias.scope !39, !noalias !40
+; CHECK-NEXT:    store i32 20, ptr [[ADD_PTR1_I_I]], align 4, !alias.scope !40, !noalias !39
+; CHECK-NEXT:    store i32 11, ptr [[ADD_PTR2_I]], align 4, !alias.scope !41, !noalias !42
+; CHECK-NEXT:    store i32 12, ptr [[ADD_PTR1]], align 4, !alias.scope !20, !noalias !17
 ; CHECK-NEXT:    br label [[CALLER_EQUALS_CALLEE_EXIT]]
 ; CHECK:       caller_equals_callee.exit:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 13, i32* %p0, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p0, i64 1
-  %add.ptr1 = getelementptr inbounds i32, i32* %p1, i64 1
-  call void @caller_equals_callee(i32* nonnull %add.ptr, i32* nonnull %add.ptr1, i32 %cnt)
+  store i32 13, ptr %p0, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p0, i64 1
+  %add.ptr1 = getelementptr inbounds i32, ptr %p1, i64 1
+  call void @caller_equals_callee(ptr nonnull %add.ptr, ptr nonnull %add.ptr1, i32 %cnt)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/noalias-cs.ll b/llvm/test/Transforms/Inline/noalias-cs.ll
index f534fca3a8bf4..948ed8f76ed39 100644
--- a/llvm/test/Transforms/Inline/noalias-cs.ll
+++ b/llvm/test/Transforms/Inline/noalias-cs.ll
@@ -4,111 +4,111 @@ target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 ; This callee uses scoped alias metadata internally itself.
-define void @callee_with_metadata(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+define void @callee_with_metadata(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: @callee_with_metadata(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4, !noalias !5
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4, !alias.scope !0, !noalias !3
-; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1_I]], align 4, !alias.scope !3, !noalias !0
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C:%.*]], align 4, !noalias !5
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1_I]], align 4, !alias.scope !3, !noalias !0
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   call void @llvm.experimental.noalias.scope.decl(metadata !7)
   call void @llvm.experimental.noalias.scope.decl(metadata !8)
-  %0 = load float, float* %c, align 4, !noalias !3
-  %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx.i, align 4, !alias.scope !7, !noalias !8
-  %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
-  store float %0, float* %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
-  %1 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %1, float* %arrayidx, align 4
+  %0 = load float, ptr %c, align 4, !noalias !3
+  %arrayidx.i = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx.i, align 4, !alias.scope !7, !noalias !8
+  %arrayidx1.i = getelementptr inbounds float, ptr %b, i64 8
+  store float %0, ptr %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
+  %1 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %1, ptr %arrayidx, align 4
   ret void
 }
 
 declare void @llvm.experimental.noalias.scope.decl(metadata);
 
 ; This callee does not make use of scoped alias metadata itself.
-define void @callee_without_metadata(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+define void @callee_without_metadata(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: @callee_without_metadata(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
-; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1_I]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx.i, align 4
-  %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
-  store float %0, float* %arrayidx1.i, align 4
-  %1 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %1, float* %arrayidx, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx.i = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx.i, align 4
+  %arrayidx1.i = getelementptr inbounds float, ptr %b, i64 8
+  store float %0, ptr %arrayidx1.i, align 4
+  %1 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %1, ptr %arrayidx, align 4
   ret void
 }
 
-define void @caller(float* nocapture %a, float* nocapture %b, float** nocapture readonly %c_ptr) #0 {
+define void @caller(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c_ptr) #0 {
 ; CHECK-LABEL: @caller(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[C:%.*]] = load float*, float** [[C_PTR:%.*]], align 8, !alias.scope !6
+; CHECK-NEXT:    [[C:%.*]] = load ptr, ptr [[C_PTR:%.*]], align 8, !alias.scope !6
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]), !noalias !6
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]), !noalias !6
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4, !noalias !14
-; CHECK-NEXT:    [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I_I]], align 4, !alias.scope !9, !noalias !15
-; CHECK-NEXT:    [[ARRAYIDX1_I_I:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1_I_I]], align 4, !alias.scope !12, !noalias !16
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4, !noalias !6
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX_I]], align 4, !noalias !6
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4, !noalias !14
+; CHECK-NEXT:    [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I_I]], align 4, !alias.scope !9, !noalias !15
+; CHECK-NEXT:    [[ARRAYIDX1_I_I:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1_I_I]], align 4, !alias.scope !12, !noalias !16
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4, !noalias !6
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX_I]], align 4, !noalias !6
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]), !alias.scope !6
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]), !alias.scope !6
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[A]], align 4, !alias.scope !6, !noalias !22
-; CHECK-NEXT:    [[ARRAYIDX_I_I1:%.*]] = getelementptr inbounds float, float* [[B]], i64 5
-; CHECK-NEXT:    store float [[TMP2]], float* [[ARRAYIDX_I_I1]], align 4, !alias.scope !23, !noalias !20
-; CHECK-NEXT:    [[ARRAYIDX1_I_I2:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP2]], float* [[ARRAYIDX1_I_I2]], align 4, !alias.scope !24, !noalias !17
-; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[A]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[ARRAYIDX_I3:%.*]] = getelementptr inbounds float, float* [[B]], i64 7
-; CHECK-NEXT:    store float [[TMP3]], float* [[ARRAYIDX_I3]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[TMP4:%.*]] = load float, float* [[C]], align 4, !noalias !6
-; CHECK-NEXT:    [[ARRAYIDX_I_I4:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP4]], float* [[ARRAYIDX_I_I4]], align 4, !noalias !6
-; CHECK-NEXT:    [[ARRAYIDX1_I_I5:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP4]], float* [[ARRAYIDX1_I_I5]], align 4, !noalias !6
-; CHECK-NEXT:    [[TMP5:%.*]] = load float, float* [[C]], align 4, !noalias !6
-; CHECK-NEXT:    [[ARRAYIDX_I6:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP5]], float* [[ARRAYIDX_I6]], align 4, !noalias !6
-; CHECK-NEXT:    [[TMP6:%.*]] = load float, float* [[A]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[ARRAYIDX_I_I7:%.*]] = getelementptr inbounds float, float* [[B]], i64 5
-; CHECK-NEXT:    store float [[TMP6]], float* [[ARRAYIDX_I_I7]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[ARRAYIDX1_I_I8:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP6]], float* [[ARRAYIDX1_I_I8]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[TMP7:%.*]] = load float, float* [[A]], align 4, !alias.scope !6
-; CHECK-NEXT:    [[ARRAYIDX_I9:%.*]] = getelementptr inbounds float, float* [[B]], i64 7
-; CHECK-NEXT:    store float [[TMP7]], float* [[ARRAYIDX_I9]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[A]], align 4, !alias.scope !6, !noalias !22
+; CHECK-NEXT:    [[ARRAYIDX_I_I1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 5
+; CHECK-NEXT:    store float [[TMP2]], ptr [[ARRAYIDX_I_I1]], align 4, !alias.scope !23, !noalias !20
+; CHECK-NEXT:    [[ARRAYIDX1_I_I2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP2]], ptr [[ARRAYIDX1_I_I2]], align 4, !alias.scope !24, !noalias !17
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[A]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[ARRAYIDX_I3:%.*]] = getelementptr inbounds float, ptr [[B]], i64 7
+; CHECK-NEXT:    store float [[TMP3]], ptr [[ARRAYIDX_I3]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[TMP4:%.*]] = load float, ptr [[C]], align 4, !noalias !6
+; CHECK-NEXT:    [[ARRAYIDX_I_I4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP4]], ptr [[ARRAYIDX_I_I4]], align 4, !noalias !6
+; CHECK-NEXT:    [[ARRAYIDX1_I_I5:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP4]], ptr [[ARRAYIDX1_I_I5]], align 4, !noalias !6
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[C]], align 4, !noalias !6
+; CHECK-NEXT:    [[ARRAYIDX_I6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP5]], ptr [[ARRAYIDX_I6]], align 4, !noalias !6
+; CHECK-NEXT:    [[TMP6:%.*]] = load float, ptr [[A]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[ARRAYIDX_I_I7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 5
+; CHECK-NEXT:    store float [[TMP6]], ptr [[ARRAYIDX_I_I7]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[ARRAYIDX1_I_I8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP6]], ptr [[ARRAYIDX1_I_I8]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[A]], align 4, !alias.scope !6
+; CHECK-NEXT:    [[ARRAYIDX_I9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 7
+; CHECK-NEXT:    store float [[TMP7]], ptr [[ARRAYIDX_I9]], align 4, !alias.scope !6
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %c = load float*, float** %c_ptr, !alias.scope !0
-  call void @callee_with_metadata(float* %a, float* %b, float* %c), !noalias !0
-  call void @callee_with_metadata(float* %b, float* %b, float* %a), !alias.scope !0
-  call void @callee_without_metadata(float* %a, float* %b, float* %c), !noalias !0
-  call void @callee_without_metadata(float* %b, float* %b, float* %a), !alias.scope !0
+  %c = load ptr, ptr %c_ptr, !alias.scope !0
+  call void @callee_with_metadata(ptr %a, ptr %b, ptr %c), !noalias !0
+  call void @callee_with_metadata(ptr %b, ptr %b, ptr %a), !alias.scope !0
+  call void @callee_without_metadata(ptr %a, ptr %b, ptr %c), !noalias !0
+  call void @callee_without_metadata(ptr %b, ptr %b, ptr %a), !alias.scope !0
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/noalias.ll b/llvm/test/Transforms/Inline/noalias.ll
index b85e902ff93e8..e25513080bedd 100644
--- a/llvm/test/Transforms/Inline/noalias.ll
+++ b/llvm/test/Transforms/Inline/noalias.ll
@@ -2,66 +2,66 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @hello(float* noalias nocapture %a, float* nocapture readonly %c) #0 {
+define void @hello(ptr noalias nocapture %a, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
+define void @foo(ptr nocapture %a, ptr nocapture readonly %c) #0 {
 entry:
-  tail call void @hello(float* %a, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello(ptr %a, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-; CHECK-LABEL: define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define void @foo(ptr nocapture %a, ptr nocapture readonly %c) #0 {
 ; CHECK: entry:
 ; CHECK:   call void @llvm.experimental.noalias.scope.decl
-; CHECK:   [[TMP0:%.+]] = load float, float* %c, align 4, !noalias !0
-; CHECK:   %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-; CHECK:   store float [[TMP0]], float* %arrayidx.i, align 4, !alias.scope !0
-; CHECK:   [[TMP1:%.+]] = load float, float* %c, align 4
-; CHECK:   %arrayidx = getelementptr inbounds float, float* %a, i64 7
-; CHECK:   store float [[TMP1]], float* %arrayidx, align 4
+; CHECK:   [[TMP0:%.+]] = load float, ptr %c, align 4, !noalias !0
+; CHECK:   %arrayidx.i = getelementptr inbounds float, ptr %a, i64 5
+; CHECK:   store float [[TMP0]], ptr %arrayidx.i, align 4, !alias.scope !0
+; CHECK:   [[TMP1:%.+]] = load float, ptr %c, align 4
+; CHECK:   %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+; CHECK:   store float [[TMP1]], ptr %arrayidx, align 4
 ; CHECK:   ret void
 ; CHECK: }
 
-define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
+define void @hello2(ptr noalias nocapture %a, ptr noalias nocapture %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
-  store float %0, float* %arrayidx1, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i64 8
+  store float %0, ptr %arrayidx1, align 4
   ret void
 }
 
-define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+define void @foo2(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 entry:
-  tail call void @hello2(float* %a, float* %b, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello2(ptr %a, ptr %b, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-; CHECK-LABEL: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define void @foo2(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK: entry:
 ; CHECK:   call void @llvm.experimental.noalias.scope.decl(metadata !3)
 ; CHECK:   call void @llvm.experimental.noalias.scope.decl(metadata !6)
-; CHECK:   [[TMP0:%.+]] = load float, float* %c, align 4, !noalias !8
-; CHECK:   %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-; CHECK:   store float [[TMP0]], float* %arrayidx.i, align 4, !alias.scope !3, !noalias !6
-; CHECK:   %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
-; CHECK:   store float [[TMP0]], float* %arrayidx1.i, align 4, !alias.scope !6, !noalias !3
-; CHECK:   [[TMP1:%.+]] = load float, float* %c, align 4
-; CHECK:   %arrayidx = getelementptr inbounds float, float* %a, i64 7
-; CHECK:   store float [[TMP1]], float* %arrayidx, align 4
+; CHECK:   [[TMP0:%.+]] = load float, ptr %c, align 4, !noalias !8
+; CHECK:   %arrayidx.i = getelementptr inbounds float, ptr %a, i64 5
+; CHECK:   store float [[TMP0]], ptr %arrayidx.i, align 4, !alias.scope !3, !noalias !6
+; CHECK:   %arrayidx1.i = getelementptr inbounds float, ptr %b, i64 8
+; CHECK:   store float [[TMP0]], ptr %arrayidx1.i, align 4, !alias.scope !6, !noalias !3
+; CHECK:   [[TMP1:%.+]] = load float, ptr %c, align 4
+; CHECK:   %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+; CHECK:   store float [[TMP1]], ptr %arrayidx, align 4
 ; CHECK:   ret void
 ; CHECK: }
 

diff  --git a/llvm/test/Transforms/Inline/noalias2.ll b/llvm/test/Transforms/Inline/noalias2.ll
index 451d7b27fd73f..7e43598f9502b 100644
--- a/llvm/test/Transforms/Inline/noalias2.ll
+++ b/llvm/test/Transforms/Inline/noalias2.ll
@@ -5,98 +5,98 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @hello(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
+define void @hello(ptr noalias nocapture %a, ptr noalias nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@hello
-; CHECK-SAME: (float* noalias nocapture [[A:%.*]], float* noalias nocapture readonly [[C:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: (ptr noalias nocapture [[A:%.*]], ptr noalias nocapture readonly [[C:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 5
-  store float %0, float* %arrayidx, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 5
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @foo(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
+define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@foo
-; CHECK-SAME: (float* noalias nocapture [[A:%.*]], float* noalias nocapture readonly [[C:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: (ptr noalias nocapture [[A:%.*]], ptr noalias nocapture readonly [[C:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4, !alias.scope !3, !noalias !0
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4, !alias.scope !0, !noalias !3
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4, !alias.scope !3, !noalias !0
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @hello(float* %a, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @hello(ptr %a, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
+define void @hello2(ptr noalias nocapture %a, ptr noalias nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@hello2
-; CHECK-SAME: (float* noalias nocapture [[A:%.*]], float* noalias nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: (ptr noalias nocapture [[A:%.*]], ptr noalias nocapture [[B:%.*]], ptr nocapture readonly [[C:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 6
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 6
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 6
-  store float %0, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
-  store float %0, float* %arrayidx1, align 4
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 6
+  store float %0, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i64 8
+  store float %0, ptr %arrayidx1, align 4
   ret void
 }
 
 ; Check that when hello() is inlined into foo(), and then foo() is inlined into
 ; foo2(), the noalias scopes are properly concatenated.
-define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+define void @foo2(ptr nocapture %a, ptr nocapture %b, ptr nocapture readonly %c) #0 {
 ; CHECK-LABEL: define {{[^@]+}}@foo2
-; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: (ptr nocapture [[A:%.*]], ptr nocapture [[B:%.*]], ptr nocapture readonly [[C:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]])
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4, !alias.scope !15, !noalias !16
-; CHECK-NEXT:    [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I_I]], align 4, !alias.scope !16, !noalias !15
-; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4, !alias.scope !8, !noalias !5
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX_I]], align 4, !alias.scope !5, !noalias !8
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4, !alias.scope !15, !noalias !16
+; CHECK-NEXT:    [[ARRAYIDX_I_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], ptr [[ARRAYIDX_I_I]], align 4, !alias.scope !16, !noalias !15
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[C]], align 4, !alias.scope !8, !noalias !5
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], ptr [[ARRAYIDX_I]], align 4, !alias.scope !5, !noalias !8
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[C]], align 4, !noalias !22
-; CHECK-NEXT:    [[ARRAYIDX_I1:%.*]] = getelementptr inbounds float, float* [[A]], i64 6
-; CHECK-NEXT:    store float [[TMP2]], float* [[ARRAYIDX_I1]], align 4, !alias.scope !17, !noalias !20
-; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
-; CHECK-NEXT:    store float [[TMP2]], float* [[ARRAYIDX1_I]], align 4, !alias.scope !20, !noalias !17
-; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[C]], align 4
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float [[TMP3]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[C]], align 4, !noalias !22
+; CHECK-NEXT:    [[ARRAYIDX_I1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 6
+; CHECK-NEXT:    store float [[TMP2]], ptr [[ARRAYIDX_I1]], align 4, !alias.scope !17, !noalias !20
+; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP2]], ptr [[ARRAYIDX1_I]], align 4, !alias.scope !20, !noalias !17
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP3]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  tail call void @foo(float* %a, float* %c)
-  tail call void @hello2(float* %a, float* %b, float* %c)
-  %0 = load float, float* %c, align 4
-  %arrayidx = getelementptr inbounds float, float* %a, i64 7
-  store float %0, float* %arrayidx, align 4
+  tail call void @foo(ptr %a, ptr %c)
+  tail call void @hello2(ptr %a, ptr %b, ptr %c)
+  %0 = load float, ptr %c, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i64 7
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/noalias3.ll b/llvm/test/Transforms/Inline/noalias3.ll
index 2b73e410eba5e..b9054363fe08a 100644
--- a/llvm/test/Transforms/Inline/noalias3.ll
+++ b/llvm/test/Transforms/Inline/noalias3.ll
@@ -1,28 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
 ; RUN: opt -passes=inline -S < %s | FileCheck %s
 
-define void @caller(i8* %ptr) {
+define void @caller(ptr %ptr) {
 ; CHECK-LABEL: define {{[^@]+}}@caller
-; CHECK-SAME: (i8* [[PTR:%.*]]) {
-; CHECK-NEXT:    [[I_I:%.*]] = load i8, i8* [[PTR]], align 1, !alias.scope !0
+; CHECK-SAME: (ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[I_I:%.*]] = load i8, ptr [[PTR]], align 1, !alias.scope !0
 ; CHECK-NEXT:    ret void
 ;
-  call void @callee(i8* %ptr)
+  call void @callee(ptr %ptr)
   ret void
 }
 
-define void @callee(i8* %ptr) {
+define void @callee(ptr %ptr) {
 ; CHECK-LABEL: define {{[^@]+}}@callee
-; CHECK-SAME: (i8* [[PTR:%.*]]) {
+; CHECK-SAME: (ptr [[PTR:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[I:%.*]] = load i8, i8* [[PTR]], align 1, !alias.scope !3
+; CHECK-NEXT:    [[I:%.*]] = load i8, ptr [[PTR]], align 1, !alias.scope !3
 ; CHECK-NEXT:    br label [[DUMMY:%.*]]
 ; CHECK:       dummy:
 ; CHECK-NEXT:    [[I_COPY:%.*]] = phi i8 [ [[I]], [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %i = load i8, i8* %ptr, !alias.scope !0
+  %i = load i8, ptr %ptr, !alias.scope !0
   br label %dummy
 
 dummy:

diff  --git a/llvm/test/Transforms/Inline/noinline-recursive-fn.ll b/llvm/test/Transforms/Inline/noinline-recursive-fn.ll
index 862eadd1700a7..ecfeae8e5cdb7 100644
--- a/llvm/test/Transforms/Inline/noinline-recursive-fn.ll
+++ b/llvm/test/Transforms/Inline/noinline-recursive-fn.ll
@@ -8,7 +8,7 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.3"
 
- at g = common global i32 0                          ; <i32*> [#uses=1]
+ at g = common global i32 0                          ; <ptr> [#uses=1]
 
 define internal void @foo(i32 %x) nounwind ssp {
 entry:
@@ -18,7 +18,7 @@ entry:
 bb:                                               ; preds = %entry
   %1 = sub nsw i32 %x, 1                          ; <i32> [#uses=1]
   call void @foo(i32 %1) nounwind ssp
-  store volatile i32 1, i32* @g, align 4
+  store volatile i32 1, ptr @g, align 4
   ret void
 
 return:                                           ; preds = %entry
@@ -38,24 +38,22 @@ entry:
 
 ;; Here is an indirect case that should not be infinitely inlined.
 
-define internal void @f1(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp {
+define internal void @f1(i32 %x, ptr %Foo, ptr %Bar) nounwind ssp {
 entry:
-  %0 = bitcast i8* %Bar to void (i32, i8*, i8*)*
-  %1 = sub nsw i32 %x, 1
-  call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind
-  store volatile i32 42, i32* @g, align 4
+  %0 = sub nsw i32 %x, 1
+  call void %Bar(i32 %0, ptr %Foo, ptr %Bar) nounwind
+  store volatile i32 42, ptr @g, align 4
   ret void
 }
 
-define internal void @f2(i32 %x, i8* %Foo, i8* %Bar) nounwind ssp {
+define internal void @f2(i32 %x, ptr %Foo, ptr %Bar) nounwind ssp {
 entry:
   %0 = icmp slt i32 %x, 0                         ; <i1> [#uses=1]
   br i1 %0, label %return, label %bb
 
 bb:                                               ; preds = %entry
-  %1 = bitcast i8* %Foo to void (i32, i8*, i8*)*  ; <void (i32, i8*, i8*)*> [#uses=1]
-  call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind
-  store volatile i32 13, i32* @g, align 4
+  call void %Foo(i32 %x, ptr %Foo, ptr %Bar) nounwind
+  store volatile i32 13, ptr @g, align 4
   ret void
 
 return:                                           ; preds = %entry
@@ -69,7 +67,7 @@ return:                                           ; preds = %entry
 ; unroll it.
 define void @top_level() nounwind ssp {
 entry:
-  call void @f2(i32 123, i8* bitcast (void (i32, i8*, i8*)* @f1 to i8*), i8* bitcast (void (i32, i8*, i8*)* @f2 to i8*)) nounwind ssp
+  call void @f2(i32 123, ptr @f1, ptr @f2) nounwind ssp
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/nonnull.ll b/llvm/test/Transforms/Inline/nonnull.ll
index 7d7a68d54aeeb..64c4e4d3a7f40 100644
--- a/llvm/test/Transforms/Inline/nonnull.ll
+++ b/llvm/test/Transforms/Inline/nonnull.ll
@@ -5,8 +5,8 @@
 declare void @foo()
 declare void @bar()
 
-define void @callee(i8* %arg) {
-  %cmp = icmp eq i8* %arg, null
+define void @callee(ptr %arg) {
+  %cmp = icmp eq ptr %arg, null
   br i1 %cmp, label %expensive, label %done
 
 ; This block is designed to be too expensive to inline.  We can only inline
@@ -30,18 +30,18 @@ done:
 }
 
 ; Positive test - arg is known non null
-define void @caller(i8* nonnull %arg) {
+define void @caller(ptr nonnull %arg) {
 ; CHECK-LABEL: @caller
 ; CHECK: call void @bar()
-  call void @callee(i8* nonnull %arg)
+  call void @callee(ptr nonnull %arg)
   ret void
 }
 
 ; Negative test - arg is not known to be non null
-define void @caller2(i8* %arg) {
+define void @caller2(ptr %arg) {
 ; CHECK-LABEL: @caller2
 ; CHECK: call void @callee(
-  call void @callee(i8* %arg)
+  call void @callee(ptr %arg)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/optimization-remarks.ll b/llvm/test/Transforms/Inline/optimization-remarks.ll
index bdcfb12830582..bc1e690ee61fe 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks.ll
@@ -36,10 +36,10 @@ define i32 @foo(i32 %x, i32 %y) #0 !prof !1 {
 entry:
   %x.addr = alloca i32, align 4
   %y.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  store i32 %y, i32* %y.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
-  %1 = load i32, i32* %y.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  store i32 %y, ptr %y.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
+  %1 = load i32, ptr %y.addr, align 4
   %add = add nsw i32 %0, %1
   ret i32 %add
 }
@@ -49,10 +49,10 @@ define float @foz(i32 %x, i32 %y) #1 !prof !1 {
 entry:
   %x.addr = alloca i32, align 4
   %y.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  store i32 %y, i32* %y.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
-  %1 = load i32, i32* %y.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  store i32 %y, ptr %y.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
+  %1 = load i32, ptr %y.addr, align 4
   %mul = mul nsw i32 %0, %1
   %conv = sitofp i32 %mul to float
   ret float %conv
@@ -64,15 +64,15 @@ declare i32 @fox()
 define i32 @bar(i32 %j) #2 !prof !1 {
 entry:
   %j.addr = alloca i32, align 4
-  store i32 %j, i32* %j.addr, align 4
-  %0 = load i32, i32* %j.addr, align 4
-  %1 = load i32, i32* %j.addr, align 4
+  store i32 %j, ptr %j.addr, align 4
+  %0 = load i32, ptr %j.addr, align 4
+  %1 = load i32, ptr %j.addr, align 4
   %sub = sub nsw i32 %1, 2
   %call = call i32 @foo(i32 %0, i32 %sub)
   %conv = sitofp i32 %call to float
-  %2 = load i32, i32* %j.addr, align 4
+  %2 = load i32, ptr %j.addr, align 4
   %sub1 = sub nsw i32 %2, 2
-  %3 = load i32, i32* %j.addr, align 4
+  %3 = load i32, ptr %j.addr, align 4
   %call2 = call float @foz(i32 %sub1, i32 %3)
   %mul = fmul float %conv, %call2
   %conv3 = fptosi float %mul to i32

diff  --git a/llvm/test/Transforms/Inline/parallel-loop-md-callee.ll b/llvm/test/Transforms/Inline/parallel-loop-md-callee.ll
index 41d7530a41e43..79eb2f463e2d0 100644
--- a/llvm/test/Transforms/Inline/parallel-loop-md-callee.ll
+++ b/llvm/test/Transforms/Inline/parallel-loop-md-callee.ll
@@ -4,29 +4,29 @@
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @Body(i32* nocapture %res, i32* nocapture readnone %c, i32* nocapture readonly %d, i32* nocapture readonly %p, i32 %i) {
+define void @Body(ptr nocapture %res, ptr nocapture readnone %c, ptr nocapture readonly %d, ptr nocapture readonly %p, i32 %i) {
 entry:
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i32, i32* %p, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !0
+  %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4, !llvm.access.group !0
   %cmp = icmp eq i32 %0, 0
-  %arrayidx2 = getelementptr inbounds i32, i32* %res, i64 %idxprom
-  %1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !0
+  %arrayidx2 = getelementptr inbounds i32, ptr %res, i64 %idxprom
+  %1 = load i32, ptr %arrayidx2, align 4, !llvm.access.group !0
   br i1 %cmp, label %cond.end, label %cond.false
 
 cond.false:
-  %arrayidx6 = getelementptr inbounds i32, i32* %d, i64 %idxprom
-  %2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !0
+  %arrayidx6 = getelementptr inbounds i32, ptr %d, i64 %idxprom
+  %2 = load i32, ptr %arrayidx6, align 4, !llvm.access.group !0
   %add = add nsw i32 %2, %1
   br label %cond.end
 
 cond.end:
   %cond = phi i32 [ %add, %cond.false ], [ %1, %entry ]
-  store i32 %cond, i32* %arrayidx2, align 4
+  store i32 %cond, ptr %arrayidx2, align 4
   ret void
 }
 
-define void @Test(i32* %res, i32* %c, i32* %d, i32* %p, i32 %n) {
+define void @Test(ptr %res, ptr %c, ptr %d, ptr %p, i32 %n) {
 entry:
   br label %for.cond
 
@@ -36,7 +36,7 @@ for.cond:
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:
-  call void @Body(i32* %res, i32* undef, i32* %d, i32* %p, i32 %i.0), !llvm.access.group !0
+  call void @Body(ptr %res, ptr undef, ptr %d, ptr %p, i32 %i.0), !llvm.access.group !0
   %inc = add nsw i32 %i.0, 1
   br label %for.cond, !llvm.loop !1
 

diff  --git a/llvm/test/Transforms/Inline/parallel-loop-md-merge.ll b/llvm/test/Transforms/Inline/parallel-loop-md-merge.ll
index 26ccb7f8e00f5..d34a99a2d0555 100644
--- a/llvm/test/Transforms/Inline/parallel-loop-md-merge.ll
+++ b/llvm/test/Transforms/Inline/parallel-loop-md-merge.ll
@@ -14,7 +14,7 @@
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define internal void @callee(i64 %n, double* noalias nonnull %A, i64 %i) #0 {
+define internal void @callee(i64 %n, ptr noalias nonnull %A, i64 %i) #0 {
 entry:
   br label %for.cond
 
@@ -26,8 +26,8 @@ for.cond:
 for.body:
   %mul = mul nsw i64 %i, %n
   %add = add nsw i64 %mul, %j.0
-  %arrayidx = getelementptr inbounds double, double* %A, i64 %add
-  store double 4.200000e+01, double* %arrayidx, align 8, !llvm.access.group !6
+  %arrayidx = getelementptr inbounds double, ptr %A, i64 %add
+  store double 4.200000e+01, ptr %arrayidx, align 8, !llvm.access.group !6
   %add1 = add nuw nsw i64 %j.0, 1
   br label %for.cond, !llvm.loop !7
 
@@ -42,7 +42,7 @@ attributes #0 = { alwaysinline }
 !9 = !{!"llvm.loop.parallel_accesses", !6}
 
 
-define void @caller(i64 %n, double* noalias nonnull %A) {
+define void @caller(i64 %n, ptr noalias nonnull %A) {
 entry:
   br label %for.cond
 
@@ -52,7 +52,7 @@ for.cond:
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:
-  call void @callee(i64 %n, double* %A, i64 %i.0), !llvm.access.group !10
+  call void @callee(i64 %n, ptr %A, i64 %i.0), !llvm.access.group !10
   %add = add nuw nsw i64 %i.0, 1
   br label %for.cond, !llvm.loop !11
 

diff  --git a/llvm/test/Transforms/Inline/parallel-loop-md.ll b/llvm/test/Transforms/Inline/parallel-loop-md.ll
index a13728ba33b6b..d1991975e2a8e 100644
--- a/llvm/test/Transforms/Inline/parallel-loop-md.ll
+++ b/llvm/test/Transforms/Inline/parallel-loop-md.ll
@@ -4,30 +4,30 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 ; Function Attrs: norecurse nounwind uwtable
-define void @Body(i32* nocapture %res, i32* nocapture readnone %c, i32* nocapture readonly %d, i32* nocapture readonly %p, i32 %i) #0 {
+define void @Body(ptr nocapture %res, ptr nocapture readnone %c, ptr nocapture readonly %d, ptr nocapture readonly %p, i32 %i) #0 {
 entry:
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i32, i32* %p, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   %cmp = icmp eq i32 %0, 0
-  %arrayidx2 = getelementptr inbounds i32, i32* %res, i64 %idxprom
-  %1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %res, i64 %idxprom
+  %1 = load i32, ptr %arrayidx2, align 4
   br i1 %cmp, label %cond.end, label %cond.false
 
 cond.false:                                       ; preds = %entry
-  %arrayidx6 = getelementptr inbounds i32, i32* %d, i64 %idxprom
-  %2 = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %d, i64 %idxprom
+  %2 = load i32, ptr %arrayidx6, align 4
   %add = add nsw i32 %2, %1
   br label %cond.end
 
 cond.end:                                         ; preds = %entry, %cond.false
   %cond = phi i32 [ %add, %cond.false ], [ %1, %entry ]
-  store i32 %cond, i32* %arrayidx2, align 4
+  store i32 %cond, ptr %arrayidx2, align 4
   ret void
 }
 
 ; Function Attrs: nounwind uwtable
-define void @Test(i32* %res, i32* %c, i32* %d, i32* %p, i32 %n) #1 {
+define void @Test(ptr %res, ptr %c, ptr %d, ptr %p, i32 %n) #1 {
 entry:
   br label %for.cond
 
@@ -37,7 +37,7 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  call void @Body(i32* %res, i32* undef, i32* %d, i32* %p, i32 %i.0), !llvm.access.group !0
+  call void @Body(ptr %res, ptr undef, ptr %d, ptr %p, i32 %i.0), !llvm.access.group !0
   %inc = add nsw i32 %i.0, 1
   br label %for.cond, !llvm.loop !1
 

diff  --git a/llvm/test/Transforms/Inline/partial-inline-act.ll b/llvm/test/Transforms/Inline/partial-inline-act.ll
index b29e929242443..141f1cd7aca70 100644
--- a/llvm/test/Transforms/Inline/partial-inline-act.ll
+++ b/llvm/test/Transforms/Inline/partial-inline-act.ll
@@ -1,20 +1,20 @@
 ; RUN: opt < %s -passes=partial-inliner -skip-partial-inlining-cost-analysis -disable-output
 ; This testcase tests the assumption cache
 
-define internal i32 @inlinedFunc(i1 %cond, i32* align 4 %align.val) {
+define internal i32 @inlinedFunc(i1 %cond, ptr align 4 %align.val) {
 entry:
   br i1 %cond, label %if.then, label %return
 if.then:
   ; Dummy store to have more than 0 uses
-  store i32 10, i32* %align.val, align 4
+  store i32 10, ptr %align.val, align 4
   br label %return
 return:             ; preds = %entry
   ret i32 0
 }
 
-define internal i32 @dummyCaller(i1 %cond, i32* align 2 %align.val) {
+define internal i32 @dummyCaller(i1 %cond, ptr align 2 %align.val) {
 entry:
-  %val = call i32 @inlinedFunc(i1 %cond, i32* %align.val)
+  %val = call i32 @inlinedFunc(i1 %cond, ptr %align.val)
   ret i32 %val
 }
 

diff  --git a/llvm/test/Transforms/Inline/pr21206.ll b/llvm/test/Transforms/Inline/pr21206.ll
index b9293aa9c7313..0123f929c5189 100644
--- a/llvm/test/Transforms/Inline/pr21206.ll
+++ b/llvm/test/Transforms/Inline/pr21206.ll
@@ -15,6 +15,6 @@ define linkonce_odr void @bar() comdat($c) {
 }
 ; CHECK: define linkonce_odr void @bar() comdat($c)
 
-define void()* @zed()  {
-  ret void()* @foo
+define ptr @zed()  {
+  ret ptr @foo
 }

diff  --git a/llvm/test/Transforms/Inline/pr26698.ll b/llvm/test/Transforms/Inline/pr26698.ll
index a0bd6a5b63fd1..265491fbe45be 100644
--- a/llvm/test/Transforms/Inline/pr26698.ll
+++ b/llvm/test/Transforms/Inline/pr26698.ll
@@ -5,7 +5,7 @@ target triple = "i686-pc-windows-msvc18.0.0"
 
 declare void @g(i32)
 
-define void @f() personality i32 (...)* @__CxxFrameHandler3 {
+define void @f() personality ptr @__CxxFrameHandler3 {
 entry:
   invoke void @g(i32 0)
           to label %invoke.cont unwind label %cs.bb
@@ -17,7 +17,7 @@ cs.bb:
   %cs = catchswitch within none [label %cp.bb] unwind label %cleanup.bb
 
 cp.bb:
-  %cpouter1 = catchpad within %cs [i8* null, i32 0, i8* null]
+  %cpouter1 = catchpad within %cs [ptr null, i32 0, ptr null]
   call void @dtor() #1 [ "funclet"(token %cpouter1) ]
   catchret from %cpouter1 to label %invoke.cont
 
@@ -30,7 +30,7 @@ cleanup.bb:
 declare i32 @__CxxFrameHandler3(...)
 
 ; Function Attrs: nounwind
-define internal void @dtor() #1 personality i32 (...)* @__CxxFrameHandler3 {
+define internal void @dtor() #1 personality ptr @__CxxFrameHandler3 {
 entry:
   invoke void @g(i32 2)
           to label %invoke.cont unwind label %ehcleanup1

diff  --git a/llvm/test/Transforms/Inline/pr48209.ll b/llvm/test/Transforms/Inline/pr48209.ll
index 0b3f22c61c2ed..29813d18458c6 100644
--- a/llvm/test/Transforms/Inline/pr48209.ll
+++ b/llvm/test/Transforms/Inline/pr48209.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=inline -S < %s | FileCheck %s
 
-declare void @external_function(i8*)
+declare void @external_function(ptr)
 
-define internal void @inlined_function(i8* %arg) {
-  call void @external_function(i8* %arg)
+define internal void @inlined_function(ptr %arg) {
+  call void @external_function(ptr %arg)
   ret void
 }
 
 ; TODO: This is a miscompile.
-define void @test(i8** %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    [[ARG:%.*]] = load i8*, i8** [[P:%.*]], align 8, !alias.scope !0
-; CHECK-NEXT:    call void @external_function(i8* [[ARG]]), !noalias !0
+; CHECK-NEXT:    [[ARG:%.*]] = load ptr, ptr [[P:%.*]], align 8, !alias.scope !0
+; CHECK-NEXT:    call void @external_function(ptr [[ARG]]), !noalias !0
 ; CHECK-NEXT:    ret void
 ;
-  %arg = load i8*, i8** %p, !alias.scope !0
-  tail call void @inlined_function(i8* %arg), !noalias !0
+  %arg = load ptr, ptr %p, !alias.scope !0
+  tail call void @inlined_function(ptr %arg), !noalias !0
   ret void
 }
 

diff  --git a/llvm/test/Transforms/Inline/pr50270.ll b/llvm/test/Transforms/Inline/pr50270.ll
index ff06f78ca17c5..98324087b7b08 100644
--- a/llvm/test/Transforms/Inline/pr50270.ll
+++ b/llvm/test/Transforms/Inline/pr50270.ll
@@ -6,62 +6,62 @@
 ; the caller. We should not be assigning incorrect noalias metadata in
 ; that case.
 
-declare { i64* } @opaque_callee()
+declare { ptr } @opaque_callee()
 
-define { i64* } @callee(i64* %x) {
+define { ptr } @callee(ptr %x) {
 ; CHECK-LABEL: @callee(
-; CHECK-NEXT:    [[RES:%.*]] = insertvalue { i64* } undef, i64* [[X:%.*]], 0
-; CHECK-NEXT:    ret { i64* } [[RES]]
+; CHECK-NEXT:    [[RES:%.*]] = insertvalue { ptr } undef, ptr [[X:%.*]], 0
+; CHECK-NEXT:    ret { ptr } [[RES]]
 ;
-  %res = insertvalue { i64* } undef, i64* %x, 0
-  ret { i64* } %res
+  %res = insertvalue { ptr } undef, ptr %x, 0
+  ret { ptr } %res
 }
 
 ; @opaque_callee() should not receive noalias metadata here.
 define void @caller() {
 ; CHECK-LABEL: @caller(
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !0)
-; CHECK-NEXT:    [[S:%.*]] = call { i64* } @opaque_callee()
-; CHECK-NEXT:    [[X:%.*]] = extractvalue { i64* } [[S]], 0
+; CHECK-NEXT:    [[S:%.*]] = call { ptr } @opaque_callee()
+; CHECK-NEXT:    [[X:%.*]] = extractvalue { ptr } [[S]], 0
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.experimental.noalias.scope.decl(metadata !0)
-  %s = call { i64* } @opaque_callee()
-  %x = extractvalue { i64* } %s, 0
-  call { i64* } @callee(i64* %x), !noalias !0
+  %s = call { ptr } @opaque_callee()
+  %x = extractvalue { ptr } %s, 0
+  call { ptr } @callee(ptr %x), !noalias !0
   ret void
 }
 
 ; @opaque_callee() should no the same noalias metadata as the load from the
 ; else branch, not as the load in the if branch.
-define { i64* } @self_caller(i1 %c, i64* %a) {
+define { ptr } @self_caller(i1 %c, ptr %a) {
 ; CHECK-LABEL: @self_caller(
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !0)
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
 ; CHECK:       if:
-; CHECK-NEXT:    [[S:%.*]] = call { i64* } @opaque_callee(), !noalias !0
-; CHECK-NEXT:    [[X:%.*]] = extractvalue { i64* } [[S]], 0
+; CHECK-NEXT:    [[S:%.*]] = call { ptr } @opaque_callee(), !noalias !0
+; CHECK-NEXT:    [[X:%.*]] = extractvalue { ptr } [[S]], 0
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !3)
-; CHECK-NEXT:    [[TMP1:%.*]] = load volatile i64, i64* [[X]], align 4, !alias.scope !3
-; CHECK-NEXT:    ret { i64* } [[S]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load volatile i64, ptr [[X]], align 4, !alias.scope !3
+; CHECK-NEXT:    ret { ptr } [[S]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[R2:%.*]] = insertvalue { i64* } undef, i64* [[A:%.*]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = load volatile i64, i64* [[A]], align 4, !alias.scope !0
-; CHECK-NEXT:    ret { i64* } [[R2]]
+; CHECK-NEXT:    [[R2:%.*]] = insertvalue { ptr } undef, ptr [[A:%.*]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = load volatile i64, ptr [[A]], align 4, !alias.scope !0
+; CHECK-NEXT:    ret { ptr } [[R2]]
 ;
   call void @llvm.experimental.noalias.scope.decl(metadata !0)
   br i1 %c, label %if, label %else
 
 if:
-  %s = call { i64* } @opaque_callee(), !noalias !0
-  %x = extractvalue { i64* } %s, 0
-  %r = call { i64* } @self_caller(i1 false, i64* %x)
-  ret { i64* } %r
+  %s = call { ptr } @opaque_callee(), !noalias !0
+  %x = extractvalue { ptr } %s, 0
+  %r = call { ptr } @self_caller(i1 false, ptr %x)
+  ret { ptr } %r
 
 else:
-  %r2 = insertvalue { i64* } undef, i64* %a, 0
-  load volatile i64, i64* %a, !alias.scope !0
-  ret { i64* } %r2
+  %r2 = insertvalue { ptr } undef, ptr %a, 0
+  load volatile i64, ptr %a, !alias.scope !0
+  ret { ptr } %r2
 }
 
 declare void @llvm.experimental.noalias.scope.decl(metadata)

diff  --git a/llvm/test/Transforms/Inline/pr50589.ll b/llvm/test/Transforms/Inline/pr50589.ll
index 8dcd1c0efbcbc..7be163fcd3527 100644
--- a/llvm/test/Transforms/Inline/pr50589.ll
+++ b/llvm/test/Transforms/Inline/pr50589.ll
@@ -4,53 +4,53 @@
 ; Test interaction of simplification during cloning with insertion of scoped
 ; noalias metadata.
 
-define <2 x i8> @callee1(<2 x i8>* %ptr1, <2 x i8>* noalias %ptr2, <2 x i1> %mask, <2 x i8> %passthru) {
+define <2 x i8> @callee1(ptr %ptr1, ptr noalias %ptr2, <2 x i1> %mask, <2 x i8> %passthru) {
 ; CHECK-LABEL: @callee1(
-; CHECK-NEXT:    [[RET:%.*]] = call <2 x i8> @llvm.masked.load.v2i8.p0v2i8(<2 x i8>* [[PTR1:%.*]], i32 1, <2 x i1> [[MASK:%.*]], <2 x i8> [[PASSTHRU:%.*]])
-; CHECK-NEXT:    store <2 x i8> zeroinitializer, <2 x i8>* [[PTR2:%.*]], align 2
+; CHECK-NEXT:    [[RET:%.*]] = call <2 x i8> @llvm.masked.load.v2i8.p0(ptr [[PTR1:%.*]], i32 1, <2 x i1> [[MASK:%.*]], <2 x i8> [[PASSTHRU:%.*]])
+; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr [[PTR2:%.*]], align 2
 ; CHECK-NEXT:    ret <2 x i8> [[RET]]
 ;
-  %ret = call <2 x i8> @llvm.masked.load.v2i8(<2 x i8>* %ptr1, i32 1, <2 x i1> %mask, <2 x i8> %passthru)
-  store <2 x i8> zeroinitializer, <2 x i8>* %ptr2
+  %ret = call <2 x i8> @llvm.masked.load.v2i8(ptr %ptr1, i32 1, <2 x i1> %mask, <2 x i8> %passthru)
+  store <2 x i8> zeroinitializer, ptr %ptr2
   ret <2 x i8> %ret
 }
 
 ; The load should not have !noalias.
-define void @caller1(<2 x i8>* %ptr1, <2 x i8>* %ptr2) {
+define void @caller1(ptr %ptr1, ptr %ptr2) {
 ; CHECK-LABEL: @caller1(
-; CHECK-NEXT:    [[PASSTHRU:%.*]] = load <2 x i8>, <2 x i8>* [[PTR2:%.*]], align 2{{$}}
+; CHECK-NEXT:    [[PASSTHRU:%.*]] = load <2 x i8>, ptr [[PTR2:%.*]], align 2{{$}}
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
-; CHECK-NEXT:    store <2 x i8> zeroinitializer, <2 x i8>* [[PTR2]], align 2, !alias.scope !0
+; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr [[PTR2]], align 2, !alias.scope !0
 ; CHECK-NEXT:    ret void
 ;
-  %passthru = load <2 x i8>, <2 x i8>* %ptr2
-  call <2 x i8> @callee1(<2 x i8>* %ptr1, <2 x i8>* %ptr2, <2 x i1> zeroinitializer, <2 x i8> %passthru)
+  %passthru = load <2 x i8>, ptr %ptr2
+  call <2 x i8> @callee1(ptr %ptr1, ptr %ptr2, <2 x i1> zeroinitializer, <2 x i8> %passthru)
   ret void
 }
 
-define <2 x i8> @callee2(<2 x i8>* %ptr1, <2 x i8>* noalias %ptr2, <2 x i1> %mask) {
+define <2 x i8> @callee2(ptr %ptr1, ptr noalias %ptr2, <2 x i1> %mask) {
 ; CHECK-LABEL: @callee2(
-; CHECK-NEXT:    [[PASSTHRU:%.*]] = load <2 x i8>, <2 x i8>* [[PTR2:%.*]], align 2
-; CHECK-NEXT:    [[RET:%.*]] = call <2 x i8> @llvm.masked.load.v2i8.p0v2i8(<2 x i8>* [[PTR1:%.*]], i32 1, <2 x i1> [[MASK:%.*]], <2 x i8> [[PASSTHRU]])
-; CHECK-NEXT:    store <2 x i8> zeroinitializer, <2 x i8>* [[PTR2]], align 2
+; CHECK-NEXT:    [[PASSTHRU:%.*]] = load <2 x i8>, ptr [[PTR2:%.*]], align 2
+; CHECK-NEXT:    [[RET:%.*]] = call <2 x i8> @llvm.masked.load.v2i8.p0(ptr [[PTR1:%.*]], i32 1, <2 x i1> [[MASK:%.*]], <2 x i8> [[PASSTHRU]])
+; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr [[PTR2]], align 2
 ; CHECK-NEXT:    ret <2 x i8> [[RET]]
 ;
-  %passthru = load <2 x i8>, <2 x i8>* %ptr2
-  %ret = call <2 x i8> @llvm.masked.load.v2i8(<2 x i8>* %ptr1, i32 1, <2 x i1> %mask, <2 x i8> %passthru)
-  store <2 x i8> zeroinitializer, <2 x i8>* %ptr2
+  %passthru = load <2 x i8>, ptr %ptr2
+  %ret = call <2 x i8> @llvm.masked.load.v2i8(ptr %ptr1, i32 1, <2 x i1> %mask, <2 x i8> %passthru)
+  store <2 x i8> zeroinitializer, ptr %ptr2
   ret <2 x i8> %ret
 }
 
 ; The load should not have !noalias.
-define void @caller2(<2 x i8>* %ptr1, <2 x i8>* %ptr2) {
+define void @caller2(ptr %ptr1, ptr %ptr2) {
 ; CHECK-LABEL: @caller2(
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT:    [[PASSTHRU_I:%.*]] = load <2 x i8>, <2 x i8>* [[PTR2:%.*]], align 2, !alias.scope !3{{$}}
-; CHECK-NEXT:    store <2 x i8> zeroinitializer, <2 x i8>* [[PTR2]], align 2, !alias.scope !3
+; CHECK-NEXT:    [[PASSTHRU_I:%.*]] = load <2 x i8>, ptr [[PTR2:%.*]], align 2, !alias.scope !3{{$}}
+; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr [[PTR2]], align 2, !alias.scope !3
 ; CHECK-NEXT:    ret void
 ;
-  call <2 x i8> @callee2(<2 x i8>* %ptr1, <2 x i8>* %ptr2, <2 x i1> zeroinitializer)
+  call <2 x i8> @callee2(ptr %ptr1, ptr %ptr2, <2 x i1> zeroinitializer)
   ret void
 }
 
-declare <2 x i8> @llvm.masked.load.v2i8(<2 x i8>*, i32, <2 x i1>, <2 x i8>)
+declare <2 x i8> @llvm.masked.load.v2i8(ptr, i32, <2 x i1>, <2 x i8>)

diff  --git a/llvm/test/Transforms/Inline/pr53206.ll b/llvm/test/Transforms/Inline/pr53206.ll
index e8d133c11c13f..e7b2b191ed6ff 100644
--- a/llvm/test/Transforms/Inline/pr53206.ll
+++ b/llvm/test/Transforms/Inline/pr53206.ll
@@ -4,7 +4,7 @@
 ; Check that the exception handling code is fully pruned, and does not
 ; leave behind invalid IR.
 
-define internal void @foo() personality i32 (...)* undef {
+define internal void @foo() personality ptr undef {
 entry:
   br i1 false, label %join, label %split
 
@@ -42,7 +42,7 @@ exit:
   ret void
 }
 
-define void @test() personality i32 (...)* undef {
+define void @test() personality ptr undef {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Transforms/Inline/prof-update-instr.ll b/llvm/test/Transforms/Inline/prof-update-instr.ll
index 6650165cb9045..38dfa67dcacdc 100644
--- a/llvm/test/Transforms/Inline/prof-update-instr.ll
+++ b/llvm/test/Transforms/Inline/prof-update-instr.ll
@@ -2,8 +2,8 @@
 ; Checks if inliner updates VP metadata for indrect call instructions
 ; with instrumentation based profile.
 
- at func = global void ()* null
- at func2 = global void ()* null
+ at func = global ptr null
+ at func2 = global ptr null
 
 ; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]]
 define void  @callee(i32 %n) !prof !15 {
@@ -11,12 +11,12 @@ define void  @callee(i32 %n) !prof !15 {
   br i1 %cond, label %cond_true, label %cond_false, !prof !20
 cond_true:
 ; f2 is optimized away, thus not updated.
-  %f2 = load void ()*, void ()** @func2
+  %f2 = load ptr, ptr @func2
 ; CHECK: call void %f2(), !prof ![[COUNT_IND_CALLEE1:[0-9]*]]
   call void %f2(), !prof !19
   ret void
 cond_false:
-  %f = load void ()*, void ()** @func
+  %f = load ptr, ptr @func
 ; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]]
   call void %f(), !prof !18
   ret void

diff  --git a/llvm/test/Transforms/Inline/prof-update-sample-alwaysinline.ll b/llvm/test/Transforms/Inline/prof-update-sample-alwaysinline.ll
index 45d62c4c3c3cf..d6b771e2629d2 100644
--- a/llvm/test/Transforms/Inline/prof-update-sample-alwaysinline.ll
+++ b/llvm/test/Transforms/Inline/prof-update-sample-alwaysinline.ll
@@ -3,7 +3,7 @@
 
 declare void @ext();
 declare void @ext1();
- at func = global void ()* null
+ at func = global ptr null
 
 ; CHECK: define void @callee(i32 %n) #0 !prof ![[ENTRY_COUNT:[0-9]*]]
 define void  @callee(i32 %n) #0 !prof !15 {
@@ -18,7 +18,7 @@ cond_false:
 ; ext is cloned and updated.
 ; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]]
   call void @ext(), !prof !16
-  %f = load void ()*, void ()** @func
+  %f = load ptr, ptr @func
 ; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]] 
   call void %f(), !prof !18
   ret void

diff  --git a/llvm/test/Transforms/Inline/prof-update-sample.ll b/llvm/test/Transforms/Inline/prof-update-sample.ll
index 24db6a1aa986d..6cdd70e84e0c6 100644
--- a/llvm/test/Transforms/Inline/prof-update-sample.ll
+++ b/llvm/test/Transforms/Inline/prof-update-sample.ll
@@ -3,7 +3,7 @@
 
 declare void @ext();
 declare void @ext1();
- at func = global void ()* null
+ at func = global ptr null
 
 ; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]]
 define void  @callee(i32 %n) !prof !15 {
@@ -18,7 +18,7 @@ cond_false:
 ; ext is cloned and updated.
 ; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]]
   call void @ext(), !prof !16
-  %f = load void ()*, void ()** @func
+  %f = load ptr, ptr @func
 ; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]] 
   call void %f(), !prof !18
   ret void

diff  --git a/llvm/test/Transforms/Inline/profile_meta_invoke.ll b/llvm/test/Transforms/Inline/profile_meta_invoke.ll
index b89868b945618..ff932af07ed99 100644
--- a/llvm/test/Transforms/Inline/profile_meta_invoke.ll
+++ b/llvm/test/Transforms/Inline/profile_meta_invoke.ll
@@ -3,20 +3,20 @@
 
 declare i32 @__gxx_personality_v0(...)
 
-define void @callee(void ()* %func) !prof !15 {
+define void @callee(ptr %func) !prof !15 {
   call void %func(), !prof !16
   ret void
 }
 
-define void @caller(void ()* %func) personality i32 (...)* @__gxx_personality_v0 {
-  invoke void @callee(void ()* %func)
+define void @caller(ptr %func) personality ptr @__gxx_personality_v0 {
+  invoke void @callee(ptr %func)
           to label %ret unwind label %lpad, !prof !17
 
 ret:
   ret void
 
 lpad:
-  %exn = landingpad {i8*, i32}
+  %exn = landingpad {ptr, i32}
           cleanup
   unreachable
 }

diff  --git a/llvm/test/Transforms/Inline/ptr-
diff .ll b/llvm/test/Transforms/Inline/ptr-
diff .ll
index 8dede3228e007..1166b09889c5e 100644
--- a/llvm/test/Transforms/Inline/ptr-
diff .ll
+++ b/llvm/test/Transforms/Inline/ptr-
diff .ll
@@ -8,16 +8,15 @@ define i32 @outer1() {
 ; CHECK: ret i32
 
   %ptr = alloca i32
-  %ptr1 = getelementptr inbounds i32, i32* %ptr, i32 0
-  %ptr2 = getelementptr inbounds i32, i32* %ptr, i32 42
-  %result = call i32 @inner1(i32* %ptr1, i32* %ptr2)
+  %ptr2 = getelementptr inbounds i32, ptr %ptr, i32 42
+  %result = call i32 @inner1(ptr %ptr, ptr %ptr2)
   ret i32 %result
 }
 
-define i32 @inner1(i32* %begin, i32* %end) {
+define i32 @inner1(ptr %begin, ptr %end) {
   call void @extern()
-  %begin.i = ptrtoint i32* %begin to i32
-  %end.i = ptrtoint i32* %end to i32
+  %begin.i = ptrtoint ptr %begin to i32
+  %end.i = ptrtoint ptr %end to i32
   %distance = sub i32 %end.i, %begin.i
   %icmp = icmp sle i32 %distance, 42
   br i1 %icmp, label %then, label %else
@@ -26,25 +25,24 @@ then:
   ret i32 3
 
 else:
-  %t = load i32, i32* %begin
+  %t = load i32, ptr %begin
   ret i32 %t
 }
 
-define i32 @outer1_as1(i32 addrspace(1)* %ptr) {
+define i32 @outer1_as1(ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: @outer1_as1(
 ; CHECK-NOT: call
 ; CHECK: ret i32
-  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i32 0
-  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i32 42
-  %result = call i32 @inner1_as1(i32 addrspace(1)* %ptr1, i32 addrspace(1)* %ptr2)
+  %ptr2 = getelementptr inbounds i32, ptr addrspace(1) %ptr, i32 42
+  %result = call i32 @inner1_as1(ptr addrspace(1) %ptr, ptr addrspace(1) %ptr2)
   ret i32 %result
 }
 
 ; Make sure that the address space's larger size makes the ptrtoints
 ; not no-ops preventing inlining
-define i32 @inner1_as1(i32 addrspace(1)* %begin, i32 addrspace(1)* %end) {
-  %begin.i = ptrtoint i32 addrspace(1)* %begin to i32
-  %end.i = ptrtoint i32 addrspace(1)* %end to i32
+define i32 @inner1_as1(ptr addrspace(1) %begin, ptr addrspace(1) %end) {
+  %begin.i = ptrtoint ptr addrspace(1) %begin to i32
+  %end.i = ptrtoint ptr addrspace(1) %end to i32
   %distance = sub i32 %end.i, %begin.i
   %icmp = icmp sle i32 %distance, 42
   br i1 %icmp, label %then, label %else
@@ -53,27 +51,26 @@ then:
   ret i32 3
 
 else:
-  %t = load i32, i32 addrspace(1)* %begin
+  %t = load i32, ptr addrspace(1) %begin
   ret i32 %t
 }
 
-define i32 @outer2(i32* %ptr) {
+define i32 @outer2(ptr %ptr) {
 ; Test that an inbounds GEP disables this -- it isn't safe in general as
 ; wrapping changes the behavior of lessthan and greaterthan comparisons.
 ; CHECK-LABEL: @outer2(
 ; CHECK: call i32 @inner2
 ; CHECK: ret i32
 
-  %ptr1 = getelementptr i32, i32* %ptr, i32 0
-  %ptr2 = getelementptr i32, i32* %ptr, i32 42
-  %result = call i32 @inner2(i32* %ptr1, i32* %ptr2)
+  %ptr2 = getelementptr i32, ptr %ptr, i32 42
+  %result = call i32 @inner2(ptr %ptr, ptr %ptr2)
   ret i32 %result
 }
 
-define i32 @inner2(i32* %begin, i32* %end) {
+define i32 @inner2(ptr %begin, ptr %end) {
   call void @extern()
-  %begin.i = ptrtoint i32* %begin to i32
-  %end.i = ptrtoint i32* %end to i32
+  %begin.i = ptrtoint ptr %begin to i32
+  %end.i = ptrtoint ptr %end to i32
   %distance = sub i32 %end.i, %begin.i
   %icmp = icmp sle i32 %distance, 42
   br i1 %icmp, label %then, label %else
@@ -82,22 +79,22 @@ then:
   ret i32 3
 
 else:
-  %t = load i32, i32* %begin
+  %t = load i32, ptr %begin
   ret i32 %t
 }
 
-define i32 @outer3(i16* addrspace(1)* %ptr) {
+define i32 @outer3(ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: @outer3(
 ; CHECK-NOT: call i32
 ; CHECK: ret i32 3
 ; CHECK-LABEL: @inner3(
-  %result = call i32 @inner3(i16* addrspace(1)* %ptr)
+  %result = call i32 @inner3(ptr addrspace(1) %ptr)
   ret i32 %result
 }
 
-define i32 @inner3(i16* addrspace(1)* %ptr) {
+define i32 @inner3(ptr addrspace(1) %ptr) {
   call void @extern()
-  %ptr.i = ptrtoint i16* addrspace(1)* %ptr to i64
+  %ptr.i = ptrtoint ptr addrspace(1) %ptr to i64
   %distance = sub i64 %ptr.i, %ptr.i
   %icmp = icmp eq i64 %distance, 0
   br i1 %icmp, label %then, label %else
@@ -114,12 +111,12 @@ else:
 ; pointer size
 define i32 @inttoptr_free_cost(i32 %a, i32 %b, i32 %c) {
   call void @extern()
-  %p1 = inttoptr i32 %a to i32 addrspace(1)*
-  %p2 = inttoptr i32 %b to i32 addrspace(1)*
-  %p3 = inttoptr i32 %c to i32 addrspace(1)*
-  %t1 = load i32, i32 addrspace(1)* %p1
-  %t2 = load i32, i32 addrspace(1)* %p2
-  %t3 = load i32, i32 addrspace(1)* %p3
+  %p1 = inttoptr i32 %a to ptr addrspace(1)
+  %p2 = inttoptr i32 %b to ptr addrspace(1)
+  %p3 = inttoptr i32 %c to ptr addrspace(1)
+  %t1 = load i32, ptr addrspace(1) %p1
+  %t2 = load i32, ptr addrspace(1) %p2
+  %t3 = load i32, ptr addrspace(1) %p3
   %s = add i32 %t1, %t2
   %s1 = add i32 %s, %t3
   ret i32 %s1
@@ -136,12 +133,12 @@ define i32 @inttoptr_free_cost_user(i32 %begin, i32 %end) {
 ; pointer size
 define i32 @inttoptr_cost_smaller_ptr(i32 %a, i32 %b, i32 %c) {
   call void @extern()
-  %p1 = inttoptr i32 %a to i32 addrspace(2)*
-  %p2 = inttoptr i32 %b to i32 addrspace(2)*
-  %p3 = inttoptr i32 %c to i32 addrspace(2)*
-  %t1 = load i32, i32 addrspace(2)* %p1
-  %t2 = load i32, i32 addrspace(2)* %p2
-  %t3 = load i32, i32 addrspace(2)* %p3
+  %p1 = inttoptr i32 %a to ptr addrspace(2)
+  %p2 = inttoptr i32 %b to ptr addrspace(2)
+  %p3 = inttoptr i32 %c to ptr addrspace(2)
+  %t1 = load i32, ptr addrspace(2) %p1
+  %t2 = load i32, ptr addrspace(2) %p2
+  %t3 = load i32, ptr addrspace(2) %p3
   %s = add i32 %t1, %t2
   %s1 = add i32 %s, %t3
   ret i32 %s1

diff  --git a/llvm/test/Transforms/Inline/recursive.ll b/llvm/test/Transforms/Inline/recursive.ll
index 9a79932cde1fd..d63076dc7ff30 100644
--- a/llvm/test/Transforms/Inline/recursive.ll
+++ b/llvm/test/Transforms/Inline/recursive.ll
@@ -8,8 +8,7 @@ define i32 @large_stack_callee(i32 %param) {
 ; CHECK-LABEL: define i32 @large_stack_callee(
 entry:
  %yyy = alloca [100000 x i8]
- %r = bitcast [100000 x i8]* %yyy to i8*
- call void @bar(i8* %r)
+ call void @bar(ptr %yyy)
  ret i32 4
 }
 
@@ -36,13 +35,13 @@ exit:
   ret i32 4
 }
 
-declare void @bar(i8* %in)
+declare void @bar(ptr %in)
 
 declare i32 @foo(i32 %param)
 
 ; Check that when inlining a non-recursive path into a function's own body that
 ; we get the re-mapping of instructions correct.
-define i32 @test_recursive_inlining_remapping(i1 %init, i8* %addr) {
+define i32 @test_recursive_inlining_remapping(i1 %init, ptr %addr) {
 ; CHECK-LABEL: define i32 @test_recursive_inlining_remapping(
 bb:
   %n = alloca i32
@@ -53,21 +52,18 @@ bb:
 ; CHECK-NEXT:    br i1 %init,
 
 store:
-  store i32 0, i32* %n
-  %cast = bitcast i32* %n to i8*
-  %v = call i32 @test_recursive_inlining_remapping(i1 false, i8* %cast)
+  store i32 0, ptr %n
+  %v = call i32 @test_recursive_inlining_remapping(i1 false, ptr %n)
   ret i32 %v
 ; CHECK-NOT:     call
 ;
-; CHECK:         store i32 0, i32* %[[N]]
-; CHECK-NEXT:    %[[CAST:.*]] = bitcast i32* %[[N]] to i8*
-; CHECK-NEXT:    %[[INLINED_LOAD:.*]] = load i32, i32* %[[N]]
+; CHECK:         store i32 0, ptr %[[N]]
+; CHECK-NEXT:    %[[INLINED_LOAD:.*]] = load i32, ptr %[[N]]
 ; CHECK-NEXT:    ret i32 %[[INLINED_LOAD]]
 ;
 ; CHECK-NOT:     call
 
 load:
-  %castback = bitcast i8* %addr to i32*
-  %n.load = load i32, i32* %castback
+  %n.load = load i32, ptr %addr
   ret i32 %n.load
 }

diff  --git a/llvm/test/Transforms/Inline/redundant-loads.ll b/llvm/test/Transforms/Inline/redundant-loads.ll
index 591e989e102a7..773be7813727b 100644
--- a/llvm/test/Transforms/Inline/redundant-loads.ll
+++ b/llvm/test/Transforms/Inline/redundant-loads.ll
@@ -5,144 +5,144 @@ target triple = "x86_64-unknown-linux-gnu"
 
 declare void @pad() readnone
 
-define void @outer1(i32* %a) {
+define void @outer1(ptr %a) {
 ; CHECK-LABEL: @outer1(
 ; CHECK-NOT: call void @inner1
   %b = alloca i32
-  call void @inner1(i32* %a, i32* %b)
+  call void @inner1(ptr %a, ptr %b)
   ret void
 }
 
-define void @inner1(i32* %a, i32* %b) {
-  %1 = load i32, i32* %a
-  store i32 %1, i32 * %b ; This store does not clobber the first load.
-  %2 = load i32, i32* %a
+define void @inner1(ptr %a, ptr %b) {
+  %1 = load i32, ptr %a
+  store i32 %1, ptr %b ; This store does not clobber the first load.
+  %2 = load i32, ptr %a
   call void @pad()
-  %3 = load i32, i32* %a
+  %3 = load i32, ptr %a
   ret void
 }
 
 
-define void @outer2(i32* %a, i32* %b) {
+define void @outer2(ptr %a, ptr %b) {
 ; CHECK-LABEL: @outer2(
 ; CHECK: call void @inner2
-  call void @inner2(i32* %a, i32* %b)
+  call void @inner2(ptr %a, ptr %b)
   ret void
 }
 
-define void @inner2(i32* %a, i32* %b) {
-  %1 = load i32, i32* %a
-  store i32 %1, i32 * %b ; This store clobbers the first load.
-  %2 = load i32, i32* %a
+define void @inner2(ptr %a, ptr %b) {
+  %1 = load i32, ptr %a
+  store i32 %1, ptr %b ; This store clobbers the first load.
+  %2 = load i32, ptr %a
   call void @pad()
   ret void
 }
 
 
-define void @outer3(i32* %a) {
+define void @outer3(ptr %a) {
 ; CHECK-LABEL: @outer3(
 ; CHECK: call void @inner3
-  call void @inner3(i32* %a)
+  call void @inner3(ptr %a)
   ret void
 }
 
 declare void @ext()
 
-define void @inner3(i32* %a) {
-  %1 = load i32, i32* %a
+define void @inner3(ptr %a) {
+  %1 = load i32, ptr %a
   call void @ext() ; This call clobbers the first load.
-  %2 = load i32, i32* %a
+  %2 = load i32, ptr %a
   ret void
 }
 
 
-define void @outer4(i32* %a, i32* %b, i32* %c) {
+define void @outer4(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @outer4(
 ; CHECK-NOT: call void @inner4
-  call void @inner4(i32* %a, i32* %b, i1 false)
+  call void @inner4(ptr %a, ptr %b, i1 false)
   ret void
 }
 
-define void @inner4(i32* %a, i32* %b, i1 %pred) {
-  %1 = load i32, i32* %a
+define void @inner4(ptr %a, ptr %b, i1 %pred) {
+  %1 = load i32, ptr %a
   br i1 %pred, label %cond_true, label %cond_false
 
 cond_true:
-  store i32 %1, i32 * %b ; This store does not clobber the first load.
+  store i32 %1, ptr %b ; This store does not clobber the first load.
   br label %cond_false
 
 cond_false:
-  %2 = load i32, i32* %a
+  %2 = load i32, ptr %a
   call void @pad()
-  %3 = load i32, i32* %a
-  %4 = load i32, i32* %a
+  %3 = load i32, ptr %a
+  %4 = load i32, ptr %a
   ret void
 }
 
 
-define void @outer5(i32* %a, double %b) {
+define void @outer5(ptr %a, double %b) {
 ; CHECK-LABEL: @outer5(
 ; CHECK-NOT: call void @inner5
-  call void @inner5(i32* %a, double %b)
+  call void @inner5(ptr %a, double %b)
   ret void
 }
 
 declare double @llvm.fabs.f64(double) nounwind readnone
 
-define void @inner5(i32* %a, double %b) {
-  %1 = load i32, i32* %a
+define void @inner5(ptr %a, double %b) {
+  %1 = load i32, ptr %a
   %2 = call double @llvm.fabs.f64(double %b) ; This intrinsic does not clobber the first load.
-  %3 = load i32, i32* %a
+  %3 = load i32, ptr %a
   call void @pad()
   ret void
 }
 
-define void @outer6(i32* %a, i8* %ptr) {
+define void @outer6(ptr %a, ptr %ptr) {
 ; CHECK-LABEL: @outer6(
 ; CHECK-NOT: call void @inner6
-  call void @inner6(i32* %a, i8* %ptr)
+  call void @inner6(ptr %a, ptr %ptr)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) argmemonly nounwind
 
-define void @inner6(i32* %a, i8* %ptr) {
-  %1 = load i32, i32* %a
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %ptr) ; This intrinsic does not clobber the first load.
-  %2 = load i32, i32* %a
+define void @inner6(ptr %a, ptr %ptr) {
+  %1 = load i32, ptr %a
+  call void @llvm.lifetime.start.p0(i64 32, ptr %ptr) ; This intrinsic does not clobber the first load.
+  %2 = load i32, ptr %a
   call void @pad()
-  %3 = load i32, i32* %a
+  %3 = load i32, ptr %a
   ret void
 }
 
-define void @outer7(i32* %a) {
+define void @outer7(ptr %a) {
 ; CHECK-LABEL: @outer7(
 ; CHECK-NOT: call void @inner7
-  call void @inner7(i32* %a)
+  call void @inner7(ptr %a)
   ret void
 }
 
 declare void @ext2() readnone
 
-define void @inner7(i32* %a) {
-  %1 = load i32, i32* %a
+define void @inner7(ptr %a) {
+  %1 = load i32, ptr %a
   call void @ext2() ; This call does not clobber the first load.
-  %2 = load i32, i32* %a
+  %2 = load i32, ptr %a
   ret void
 }
 
 
-define void @outer8(i32* %a) {
+define void @outer8(ptr %a) {
 ; CHECK-LABEL: @outer8(
 ; CHECK-NOT: call void @inner8
-  call void @inner8(i32* %a, void ()* @ext2)
+  call void @inner8(ptr %a, ptr @ext2)
   ret void
 }
 
-define void @inner8(i32* %a, void ()* %f) {
-  %1 = load i32, i32* %a
+define void @inner8(ptr %a, ptr %f) {
+  %1 = load i32, ptr %a
   call void %f() ; This indirect call does not clobber the first load.
-  %2 = load i32, i32* %a
+  %2 = load i32, ptr %a
   call void @pad()
   call void @pad()
   call void @pad()
@@ -159,17 +159,17 @@ define void @inner8(i32* %a, void ()* %f) {
 }
 
 
-define void @outer9(i32* %a) {
+define void @outer9(ptr %a) {
 ; CHECK-LABEL: @outer9(
 ; CHECK: call void @inner9
-  call void @inner9(i32* %a, void ()* @ext)
+  call void @inner9(ptr %a, ptr @ext)
   ret void
 }
 
-define void @inner9(i32* %a, void ()* %f) {
-  %1 = load i32, i32* %a
+define void @inner9(ptr %a, ptr %f) {
+  %1 = load i32, ptr %a
   call void %f() ; This indirect call clobbers the first load.
-  %2 = load i32, i32* %a
+  %2 = load i32, ptr %a
   call void @pad()
   call void @pad()
   call void @pad()
@@ -186,19 +186,19 @@ define void @inner9(i32* %a, void ()* %f) {
 }
 
 
-define void @outer10(i32* %a) {
+define void @outer10(ptr %a) {
 ; CHECK-LABEL: @outer10(
 ; CHECK: call void @inner10
   %b = alloca i32
-  call void @inner10(i32* %a, i32* %b)
+  call void @inner10(ptr %a, ptr %b)
   ret void
 }
 
-define void @inner10(i32* %a, i32* %b) {
-  %1 = load i32, i32* %a
-  store i32 %1, i32 * %b
-  %2 = load volatile i32, i32* %a ; volatile load should be kept.
+define void @inner10(ptr %a, ptr %b) {
+  %1 = load i32, ptr %a
+  store i32 %1, ptr %b
+  %2 = load volatile i32, ptr %a ; volatile load should be kept.
   call void @pad()
-  %3 = load volatile i32, i32* %a ; Same as the above.
+  %3 = load volatile i32, ptr %a ; Same as the above.
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/ret_attr_update.ll b/llvm/test/Transforms/Inline/ret_attr_update.ll
index bfe4b048ad256..bba7147ad0046 100644
--- a/llvm/test/Transforms/Inline/ret_attr_update.ll
+++ b/llvm/test/Transforms/Inline/ret_attr_update.ll
@@ -2,206 +2,206 @@
 ; RUN: opt < %s -inline-threshold=0 -passes=always-inline -S | FileCheck %s
 ; RUN: opt < %s -passes=always-inline -S | FileCheck %s
 
-declare i8* @foo(i8*) nounwind willreturn
+declare ptr @foo(ptr) nounwind willreturn
 
-define i8* @callee(i8 *%p) alwaysinline {
+define ptr @callee(ptr %p) alwaysinline {
 ; CHECK-LABEL: @callee(
-; CHECK-NEXT:    [[R:%.*]] = call i8* @foo(i8* noalias [[P:%.*]])
-; CHECK-NEXT:    ret i8* [[R]]
+; CHECK-NEXT:    [[R:%.*]] = call ptr @foo(ptr noalias [[P:%.*]])
+; CHECK-NEXT:    ret ptr [[R]]
 ;
-  %r = call i8* @foo(i8* noalias %p)
-  ret i8* %r
+  %r = call ptr @foo(ptr noalias %p)
+  ret ptr %r
 }
 
-define i8* @caller(i8* %ptr, i64 %x) {
+define ptr @caller(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @caller(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call nonnull i8* @foo(i8* noalias [[GEP]])
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call nonnull ptr @foo(ptr noalias [[GEP]])
+; CHECK-NEXT:    ret ptr [[R_I]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %p = call nonnull i8* @callee(i8* %gep)
-  ret i8* %p
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %p = call nonnull ptr @callee(ptr %gep)
+  ret ptr %p
 }
 
 declare void @llvm.experimental.guard(i1,...)
 ; Cannot add nonnull attribute to foo
 ; because the guard is a throwing call
-define internal i8* @callee_with_throwable(i8* %p) alwaysinline {
-  %r = call i8* @foo(i8* %p)
-  %cond = icmp ne i8* %r, null
+define internal ptr @callee_with_throwable(ptr %p) alwaysinline {
+  %r = call ptr @foo(ptr %p)
+  %cond = icmp ne ptr %r, null
   call void (i1, ...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
-  ret i8* %r
+  ret ptr %r
 }
 
-declare i8* @bar(i8*) readonly nounwind
+declare ptr @bar(ptr) readonly nounwind
 ; Here also we cannot add nonnull attribute to the call bar.
-define internal i8* @callee_with_explicit_control_flow(i8* %p) alwaysinline {
-  %r = call i8* @bar(i8* %p)
-  %cond = icmp ne i8* %r, null
+define internal ptr @callee_with_explicit_control_flow(ptr %p) alwaysinline {
+  %r = call ptr @bar(ptr %p)
+  %cond = icmp ne ptr %r, null
   br i1 %cond, label %ret, label %orig
 
 ret:
-  ret i8* %r
+  ret ptr %r
 
 orig:
-  ret i8* %p
+  ret ptr %p
 }
 
-define i8* @caller2(i8* %ptr, i64 %x, i1 %cond) {
+define ptr @caller2(ptr %ptr, i64 %x, i1 %cond) {
 ; CHECK-LABEL: @caller2(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call i8* @foo(i8* [[GEP]])
-; CHECK-NEXT:    [[COND_I:%.*]] = icmp ne i8* [[R_I]], null
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call ptr @foo(ptr [[GEP]])
+; CHECK-NEXT:    [[COND_I:%.*]] = icmp ne ptr [[R_I]], null
 ; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND_I]]) [ "deopt"() ]
-; CHECK-NEXT:    [[R_I1:%.*]] = call i8* @bar(i8* [[GEP]])
-; CHECK-NEXT:    [[COND_I2:%.*]] = icmp ne i8* [[R_I1]], null
+; CHECK-NEXT:    [[R_I1:%.*]] = call ptr @bar(ptr [[GEP]])
+; CHECK-NEXT:    [[COND_I2:%.*]] = icmp ne ptr [[R_I1]], null
 ; CHECK-NEXT:    br i1 [[COND_I2]], label [[RET_I:%.*]], label [[ORIG_I:%.*]]
 ; CHECK:       ret.i:
 ; CHECK-NEXT:    br label [[CALLEE_WITH_EXPLICIT_CONTROL_FLOW_EXIT:%.*]]
 ; CHECK:       orig.i:
 ; CHECK-NEXT:    br label [[CALLEE_WITH_EXPLICIT_CONTROL_FLOW_EXIT]]
 ; CHECK:       callee_with_explicit_control_flow.exit:
-; CHECK-NEXT:    [[Q3:%.*]] = phi i8* [ [[R_I1]], [[RET_I]] ], [ [[GEP]], [[ORIG_I]] ]
+; CHECK-NEXT:    [[Q3:%.*]] = phi ptr [ [[R_I1]], [[RET_I]] ], [ [[GEP]], [[ORIG_I]] ]
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[PRET:%.*]], label [[QRET:%.*]]
 ; CHECK:       pret:
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    ret ptr [[R_I]]
 ; CHECK:       qret:
-; CHECK-NEXT:    ret i8* [[Q3]]
+; CHECK-NEXT:    ret ptr [[Q3]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %p = call nonnull i8* @callee_with_throwable(i8* %gep)
-  %q = call nonnull i8* @callee_with_explicit_control_flow(i8* %gep)
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %p = call nonnull ptr @callee_with_throwable(ptr %gep)
+  %q = call nonnull ptr @callee_with_explicit_control_flow(ptr %gep)
   br i1 %cond, label %pret, label %qret
 
 pret:
-  ret i8* %p
+  ret ptr %p
 
 qret:
-  ret i8* %q
+  ret ptr %q
 }
 
-define internal i8* @callee3(i8 *%p) alwaysinline {
-  %r = call noalias i8* @foo(i8* %p)
-  ret i8* %r
+define internal ptr @callee3(ptr %p) alwaysinline {
+  %r = call noalias ptr @foo(ptr %p)
+  ret ptr %r
 }
 
 ; add the deref attribute to the existing attributes on foo.
-define i8* @caller3(i8* %ptr, i64 %x) {
+define ptr @caller3(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @caller3(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call noalias dereferenceable_or_null(12) i8* @foo(i8* [[GEP]])
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call noalias dereferenceable_or_null(12) ptr @foo(ptr [[GEP]])
+; CHECK-NEXT:    ret ptr [[R_I]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %p = call dereferenceable_or_null(12) i8* @callee3(i8* %gep)
-  ret i8* %p
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %p = call dereferenceable_or_null(12) ptr @callee3(ptr %gep)
+  ret ptr %p
 }
 
-declare i8* @inf_loop_call(i8*) nounwind
+declare ptr @inf_loop_call(ptr) nounwind
 ; We cannot propagate attributes to foo because we do not know whether inf_loop_call
 ; will return execution.
-define internal i8* @callee_with_sideeffect_callsite(i8* %p) alwaysinline {
-  %r = call i8* @foo(i8* %p)
-  %v = call i8* @inf_loop_call(i8* %p)
-  ret i8* %r
+define internal ptr @callee_with_sideeffect_callsite(ptr %p) alwaysinline {
+  %r = call ptr @foo(ptr %p)
+  %v = call ptr @inf_loop_call(ptr %p)
+  ret ptr %r
 }
 
 ; do not add deref attribute to foo
-define i8* @test4(i8* %ptr, i64 %x) {
+define ptr @test4(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call i8* @foo(i8* [[GEP]])
-; CHECK-NEXT:    [[V_I:%.*]] = call i8* @inf_loop_call(i8* [[GEP]])
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call ptr @foo(ptr [[GEP]])
+; CHECK-NEXT:    [[V_I:%.*]] = call ptr @inf_loop_call(ptr [[GEP]])
+; CHECK-NEXT:    ret ptr [[R_I]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %p = call dereferenceable_or_null(12) i8* @callee_with_sideeffect_callsite(i8* %gep)
-  ret i8* %p
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %p = call dereferenceable_or_null(12) ptr @callee_with_sideeffect_callsite(ptr %gep)
+  ret ptr %p
 }
 
-declare i8* @baz(i8*) nounwind willreturn
-define internal i8* @callee5(i8* %p) alwaysinline {
-  %r = call i8* @foo(i8* %p)
-  %v = call i8* @baz(i8* %p)
-  ret i8* %r
+declare ptr @baz(ptr) nounwind willreturn
+define internal ptr @callee5(ptr %p) alwaysinline {
+  %r = call ptr @foo(ptr %p)
+  %v = call ptr @baz(ptr %p)
+  ret ptr %r
 }
 
 ; add the deref attribute to foo.
-define i8* @test5(i8* %ptr, i64 %x) {
+define ptr @test5(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call dereferenceable_or_null(12) i8* @foo(i8* [[GEP]])
-; CHECK-NEXT:    [[V_I:%.*]] = call i8* @baz(i8* [[GEP]])
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call dereferenceable_or_null(12) ptr @foo(ptr [[GEP]])
+; CHECK-NEXT:    [[V_I:%.*]] = call ptr @baz(ptr [[GEP]])
+; CHECK-NEXT:    ret ptr [[R_I]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %s = call dereferenceable_or_null(12) i8* @callee5(i8* %gep)
-  ret i8* %s
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %s = call dereferenceable_or_null(12) ptr @callee5(ptr %gep)
+  ret ptr %s
 }
 
 ; deref attributes have 
diff erent values on the callee and the call feeding into
 ; the return.
 ; AttrBuilder overwrites the existing value.
-define internal i8* @callee6(i8* %p) alwaysinline {
-  %r = call dereferenceable_or_null(16) i8* @foo(i8* %p)
-  %v = call i8* @baz(i8* %p)
-  ret i8* %r
+define internal ptr @callee6(ptr %p) alwaysinline {
+  %r = call dereferenceable_or_null(16) ptr @foo(ptr %p)
+  %v = call ptr @baz(ptr %p)
+  ret ptr %r
 }
 
 
-define i8* @test6(i8* %ptr, i64 %x) {
+define ptr @test6(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call dereferenceable_or_null(12) i8* @foo(i8* [[GEP]])
-; CHECK-NEXT:    [[V_I:%.*]] = call i8* @baz(i8* [[GEP]])
-; CHECK-NEXT:    ret i8* [[R_I]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call dereferenceable_or_null(12) ptr @foo(ptr [[GEP]])
+; CHECK-NEXT:    [[V_I:%.*]] = call ptr @baz(ptr [[GEP]])
+; CHECK-NEXT:    ret ptr [[R_I]]
 ;
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %s = call dereferenceable_or_null(12) i8* @callee6(i8* %gep)
-  ret i8* %s
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %s = call dereferenceable_or_null(12) ptr @callee6(ptr %gep)
+  ret ptr %s
 }
 
 ; We add the attributes from the callee to both the calls below.
-define internal i8* @callee7(i8 *%ptr, i1 %cond) alwaysinline {
+define internal ptr @callee7(ptr %ptr, i1 %cond) alwaysinline {
   br i1 %cond, label %pass, label %fail
 
 pass:
-  %r = call i8* @foo(i8* noalias %ptr)
-  ret i8* %r
+  %r = call ptr @foo(ptr noalias %ptr)
+  ret ptr %r
 
 fail:
-  %s = call i8* @baz(i8* %ptr)
-  ret i8* %s
+  %s = call ptr @baz(ptr %ptr)
+  ret ptr %s
 }
 
-define void @test7(i8* %ptr, i64 %x, i1 %cond) {
+define void @test7(ptr %ptr, i64 %x, i1 %cond) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[PASS_I:%.*]], label [[FAIL_I:%.*]]
 ; CHECK:       pass.i:
-; CHECK-NEXT:    [[R_I:%.*]] = call nonnull i8* @foo(i8* noalias [[GEP]])
+; CHECK-NEXT:    [[R_I:%.*]] = call nonnull ptr @foo(ptr noalias [[GEP]])
 ; CHECK-NEXT:    br label [[CALLEE7_EXIT:%.*]]
 ; CHECK:       fail.i:
-; CHECK-NEXT:    [[S_I:%.*]] = call nonnull i8* @baz(i8* [[GEP]])
+; CHECK-NEXT:    [[S_I:%.*]] = call nonnull ptr @baz(ptr [[GEP]])
 ; CHECK-NEXT:    br label [[CALLEE7_EXIT]]
 ; CHECK:       callee7.exit:
-; CHECK-NEXT:    [[T1:%.*]] = phi i8* [ [[R_I]], [[PASS_I]] ], [ [[S_I]], [[FAIL_I]] ]
-; CHECK-NEXT:    call void @snort(i8* [[T1]])
+; CHECK-NEXT:    [[T1:%.*]] = phi ptr [ [[R_I]], [[PASS_I]] ], [ [[S_I]], [[FAIL_I]] ]
+; CHECK-NEXT:    call void @snort(ptr [[T1]])
 ; CHECK-NEXT:    ret void
 ;
 
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %t = call nonnull i8* @callee7(i8* %gep, i1 %cond)
-  call void @snort(i8* %t)
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %t = call nonnull ptr @callee7(ptr %gep, i1 %cond)
+  call void @snort(ptr %t)
   ret void
 }
-declare void @snort(i8*)
+declare void @snort(ptr)
 
-declare i32 @intrinsic(i8*) nounwind argmemonly
+declare i32 @intrinsic(ptr) nounwind argmemonly
 
-define internal i32 @callee8(i8* %ptr) alwaysinline {
-  %r = call i32 @intrinsic(i8* noalias %ptr)
+define internal i32 @callee8(ptr %ptr) alwaysinline {
+  %r = call i32 @intrinsic(ptr noalias %ptr)
   ret i32 %r
 }
 
@@ -210,14 +210,14 @@ define internal i32 @callee8(i8* %ptr) alwaysinline {
 ; callee/callsite.
 ; We cannot propagate that attribute to another call since it can be invalid at
 ; that call.
-define i32 @test8(i8* %ptr, i64 %x) {
+define i32 @test8(ptr %ptr, i64 %x) {
 ; CHECK-LABEL: @test8(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[X:%.*]]
-; CHECK-NEXT:    [[R_I:%.*]] = call i32 @intrinsic(i8* noalias [[GEP]])
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[X:%.*]]
+; CHECK-NEXT:    [[R_I:%.*]] = call i32 @intrinsic(ptr noalias [[GEP]])
 ; CHECK-NEXT:    ret i32 [[R_I]]
 ;
 
-  %gep = getelementptr inbounds i8, i8* %ptr, i64 %x
-  %t = call signext i32 @callee8(i8* %gep)
+  %gep = getelementptr inbounds i8, ptr %ptr, i64 %x
+  %t = call signext i32 @callee8(ptr %gep)
   ret i32 %t
 }

diff  --git a/llvm/test/Transforms/Inline/store-sroa.ll b/llvm/test/Transforms/Inline/store-sroa.ll
index 6b1ca964d3304..6a63783b105be 100644
--- a/llvm/test/Transforms/Inline/store-sroa.ll
+++ b/llvm/test/Transforms/Inline/store-sroa.ll
@@ -2,12 +2,11 @@
 
 %class.A = type { i32 }
 
-define void @_Z3barP1A(%class.A* %a) #0 {
+define void @_Z3barP1A(ptr %a) #0 {
 entry:
-  %a1 = getelementptr inbounds %class.A, %class.A* %a, i64 0, i32 0
-  %0 = load i32, i32* %a1, align 4
+  %0 = load i32, ptr %a, align 4
   %add = add nsw i32 %0, 10
-  store i32 %add, i32* %a1, align 4
+  store i32 %add, ptr %a, align 4
   ret void
 }
 
@@ -17,6 +16,6 @@ define void @_Z3foov() #0 {
 ; CHECK: ret
 entry:
   %a = alloca %class.A, align 4
-  call void @_Z3barP1A(%class.A* %a)
+  call void @_Z3barP1A(ptr %a)
   ret void
 }

diff  --git a/llvm/test/Transforms/Inline/unwind-inline-asm.ll b/llvm/test/Transforms/Inline/unwind-inline-asm.ll
index cb761921dadd0..9d0016ddb6d55 100644
--- a/llvm/test/Transforms/Inline/unwind-inline-asm.ll
+++ b/llvm/test/Transforms/Inline/unwind-inline-asm.ll
@@ -10,14 +10,14 @@ entry:
   unreachable
 }
 
-define dso_local void @proxy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @proxy() personality ptr @__gxx_personality_v0 {
 entry:
   call void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"()
   call void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"()
   ret void
 }
 
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
 entry:
 ; CHECK: define dso_local void @test
 ; CHECK-NOT: invoke void @proxy()
@@ -31,16 +31,16 @@ invoke.cont:
   ret void
 
 lpad:
-; CHECK: %0 = landingpad { i8*, i32 }
-; CHECK: resume { i8*, i32 } %0
+; CHECK: %0 = landingpad { ptr, i32 }
+; CHECK: resume { ptr, i32 } %0
 
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
-  resume { i8*, i32 } %0
+  call void (ptr, ...) @printf(ptr @.str.2)
+  resume { ptr, i32 } %0
 
 }
 
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)


        


More information about the llvm-commits mailing list