[llvm] 04b944e - [InstSimplify] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 10 08:16:39 PDT 2022


Author: Nikita Popov
Date: 2022-06-10T17:16:28+02:00
New Revision: 04b944e23050e4e0c6ee983cc9bc17740315ea4f

URL: https://github.com/llvm/llvm-project/commit/04b944e23050e4e0c6ee983cc9bc17740315ea4f
DIFF: https://github.com/llvm/llvm-project/commit/04b944e23050e4e0c6ee983cc9bc17740315ea4f.diff

LOG: [InstSimplify] Convert tests to opaque pointers (NFC)

The only interesting test change is in @PR31262, where the following
fold is now performed, while it previously was not:
https://alive2.llvm.org/ce/z/a5Qmr6

llvm/test/Transforms/InstSimplify/ConstProp/gep.ll has not been
updated, because there is a tradeoff between folding and inrange
preservation there that we may want to discuss.

Updates have been performed using:
https://gist.github.com/nikic/98357b71fd67756b0f064c9517b62a34

Added: 
    

Modified: 
    llvm/test/Transforms/InstSimplify/2011-09-05-InsertExtractValue.ll
    llvm/test/Transforms/InstSimplify/2011-10-27-BinOpCrash.ll
    llvm/test/Transforms/InstSimplify/ConstProp/2005-01-28-SetCCGEP.ll
    llvm/test/Transforms/InstSimplify/ConstProp/2008-07-07-VectorCompare.ll
    llvm/test/Transforms/InstSimplify/ConstProp/2009-06-20-constexpr-zero-lhs.ll
    llvm/test/Transforms/InstSimplify/ConstProp/2009-09-01-GEP-Crash.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cos.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubeid.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubema.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubesc.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubetc.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fma_legacy.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fmul_legacy.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fract.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/perm.ll
    llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/sin.ll
    llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/trunc.ll
    llvm/test/Transforms/InstSimplify/ConstProp/allones.ll
    llvm/test/Transforms/InstSimplify/ConstProp/basictest.ll
    llvm/test/Transforms/InstSimplify/ConstProp/bitcast.ll
    llvm/test/Transforms/InstSimplify/ConstProp/calls-math-finite.ll
    llvm/test/Transforms/InstSimplify/ConstProp/calls.ll
    llvm/test/Transforms/InstSimplify/ConstProp/cast-vector.ll
    llvm/test/Transforms/InstSimplify/ConstProp/constant-expr.ll
    llvm/test/Transforms/InstSimplify/ConstProp/div-zero.ll
    llvm/test/Transforms/InstSimplify/ConstProp/float-to-ptr-cast.ll
    llvm/test/Transforms/InstSimplify/ConstProp/freeze.ll
    llvm/test/Transforms/InstSimplify/ConstProp/gep-alias-gep-load.ll
    llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
    llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
    llvm/test/Transforms/InstSimplify/ConstProp/gep-zeroinit-vector.ll
    llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
    llvm/test/Transforms/InstSimplify/ConstProp/loads.ll
    llvm/test/Transforms/InstSimplify/ConstProp/poison.ll
    llvm/test/Transforms/InstSimplify/ConstProp/shift.ll
    llvm/test/Transforms/InstSimplify/ConstProp/timeout.ll
    llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll
    llvm/test/Transforms/InstSimplify/ConstProp/vscale-getelementptr.ll
    llvm/test/Transforms/InstSimplify/ConstProp/vscale-inseltpoison.ll
    llvm/test/Transforms/InstSimplify/ConstProp/vscale.ll
    llvm/test/Transforms/InstSimplify/add-mask.ll
    llvm/test/Transforms/InstSimplify/and-or-icmp-nullptr.ll
    llvm/test/Transforms/InstSimplify/and-or-icmp-zero.ll
    llvm/test/Transforms/InstSimplify/assume-non-zero.ll
    llvm/test/Transforms/InstSimplify/call.ll
    llvm/test/Transforms/InstSimplify/cast.ll
    llvm/test/Transforms/InstSimplify/cmp-alloca-offsets.ll
    llvm/test/Transforms/InstSimplify/compare.ll
    llvm/test/Transforms/InstSimplify/fold-intrinsics.ll
    llvm/test/Transforms/InstSimplify/freeze-noundef.ll
    llvm/test/Transforms/InstSimplify/freeze.ll
    llvm/test/Transforms/InstSimplify/gc_relocate.ll
    llvm/test/Transforms/InstSimplify/gep.ll
    llvm/test/Transforms/InstSimplify/icmp.ll
    llvm/test/Transforms/InstSimplify/insertelement.ll
    llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
    llvm/test/Transforms/InstSimplify/invariant.group-load.ll
    llvm/test/Transforms/InstSimplify/known-non-zero.ll
    llvm/test/Transforms/InstSimplify/load-relative-32.ll
    llvm/test/Transforms/InstSimplify/load-relative.ll
    llvm/test/Transforms/InstSimplify/load.ll
    llvm/test/Transforms/InstSimplify/maxmin_intrinsics.ll
    llvm/test/Transforms/InstSimplify/noalias-ptr.ll
    llvm/test/Transforms/InstSimplify/null-ptr-is-valid-attribute.ll
    llvm/test/Transforms/InstSimplify/null-ptr-is-valid.ll
    llvm/test/Transforms/InstSimplify/opaque-ptr.ll
    llvm/test/Transforms/InstSimplify/past-the-end.ll
    llvm/test/Transforms/InstSimplify/phi-cse.ll
    llvm/test/Transforms/InstSimplify/phi.ll
    llvm/test/Transforms/InstSimplify/pr33957.ll
    llvm/test/Transforms/InstSimplify/pr49495.ll
    llvm/test/Transforms/InstSimplify/ptr_diff.ll
    llvm/test/Transforms/InstSimplify/redundant-null-check-in-uadd_with_overflow-of-nonnull-ptr.ll
    llvm/test/Transforms/InstSimplify/remove-dead-call.ll
    llvm/test/Transforms/InstSimplify/require-dominator.ll
    llvm/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll
    llvm/test/Transforms/InstSimplify/returned.ll
    llvm/test/Transforms/InstSimplify/select-implied.ll
    llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
    llvm/test/Transforms/InstSimplify/select.ll
    llvm/test/Transforms/InstSimplify/simplify-nested-bitcast.ll
    llvm/test/Transforms/InstSimplify/vector_gep.ll
    llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
    llvm/test/Transforms/InstSimplify/vscale-inseltpoison.ll
    llvm/test/Transforms/InstSimplify/vscale.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstSimplify/2011-09-05-InsertExtractValue.ll b/llvm/test/Transforms/InstSimplify/2011-09-05-InsertExtractValue.ll
index d51dfe454f703..d9a89bf74a347 100644
--- a/llvm/test/Transforms/InstSimplify/2011-09-05-InsertExtractValue.ll
+++ b/llvm/test/Transforms/InstSimplify/2011-09-05-InsertExtractValue.ll
@@ -3,7 +3,7 @@
 
 declare void @bar()
 
-define void @test1() personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0 {
+define void @test1() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    invoke void @bar()
@@ -11,32 +11,32 @@ define void @test1() personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
 ; CHECK:       cont:
 ; CHECK-NEXT:    ret void
 ; CHECK:       lpad:
-; CHECK-NEXT:    [[EX:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    [[EX:%.*]] = landingpad { ptr, i32 }
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    resume { i8*, i32 } [[EX]]
+; CHECK-NEXT:    resume { ptr, i32 } [[EX]]
 ;
 entry:
   invoke void @bar() to label %cont unwind label %lpad
 cont:
   ret void
 lpad:
-  %ex = landingpad { i8*, i32 } cleanup
-  %exc_ptr = extractvalue { i8*, i32 } %ex, 0
-  %filter = extractvalue { i8*, i32 } %ex, 1
-  %exc_ptr2 = insertvalue { i8*, i32 } undef, i8* %exc_ptr, 0
-  %filter2 = insertvalue { i8*, i32 } %exc_ptr2, i32 %filter, 1
-  resume { i8*, i32 } %filter2
+  %ex = landingpad { ptr, i32 } cleanup
+  %exc_ptr = extractvalue { ptr, i32 } %ex, 0
+  %filter = extractvalue { ptr, i32 } %ex, 1
+  %exc_ptr2 = insertvalue { ptr, i32 } undef, ptr %exc_ptr, 0
+  %filter2 = insertvalue { ptr, i32 } %exc_ptr2, i32 %filter, 1
+  resume { ptr, i32 } %filter2
 }
 
-declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
+declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr)
 
-define { i8, i32 } @test2({ i8*, i32 } %x) {
+define { i8, i32 } @test2({ ptr, i32 } %x) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[EX:%.*]] = extractvalue { i8*, i32 } [[X:%.*]], 1
+; CHECK-NEXT:    [[EX:%.*]] = extractvalue { ptr, i32 } [[X:%.*]], 1
 ; CHECK-NEXT:    [[INS:%.*]] = insertvalue { i8, i32 } undef, i32 [[EX]], 1
 ; CHECK-NEXT:    ret { i8, i32 } [[INS]]
 ;
-  %ex = extractvalue { i8*, i32 } %x, 1
+  %ex = extractvalue { ptr, i32 } %x, 1
   %ins = insertvalue { i8, i32 } undef, i32 %ex, 1
   ret { i8, i32 } %ins
 }

diff  --git a/llvm/test/Transforms/InstSimplify/2011-10-27-BinOpCrash.ll b/llvm/test/Transforms/InstSimplify/2011-10-27-BinOpCrash.ll
index 8d391cd987f20..549e9942a2402 100644
--- a/llvm/test/Transforms/InstSimplify/2011-10-27-BinOpCrash.ll
+++ b/llvm/test/Transforms/InstSimplify/2011-10-27-BinOpCrash.ll
@@ -7,7 +7,7 @@
 @_ZN11xercesc_2_5L15gCombiningCharsE = external constant [163 x i16], align 2
 
 define i32 @_ZN11xercesc_2_515XMLRangeFactory11buildRangesEv(i32 %x) {
-  %a = add i32 %x, add (i32 add (i32 ashr (i32 add (i32 mul (i32 ptrtoint ([32 x i16]* @_ZN11xercesc_2_5L11gDigitCharsE to i32), i32 -1), i32 ptrtoint (i16* getelementptr inbounds ([32 x i16], [32 x i16]* @_ZN11xercesc_2_5L11gDigitCharsE, i32 0, i32 30) to i32)), i32 1), i32 ashr (i32 add (i32 mul (i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32), i32 -1), i32 ptrtoint (i16* getelementptr inbounds ([7 x i16], [7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32)), i32 1)), i32 8)
+  %a = add i32 %x, add (i32 add (i32 ashr (i32 add (i32 mul (i32 ptrtoint (ptr @_ZN11xercesc_2_5L11gDigitCharsE to i32), i32 -1), i32 ptrtoint (ptr getelementptr inbounds ([32 x i16], ptr @_ZN11xercesc_2_5L11gDigitCharsE, i32 0, i32 30) to i32)), i32 1), i32 ashr (i32 add (i32 mul (i32 ptrtoint (ptr @_ZN11xercesc_2_5L17gIdeographicCharsE to i32), i32 -1), i32 ptrtoint (ptr getelementptr inbounds ([7 x i16], ptr @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32)), i32 1)), i32 8)
   %b = add i32 %a, %x
   ret i32 %b
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/2005-01-28-SetCCGEP.ll b/llvm/test/Transforms/InstSimplify/ConstProp/2005-01-28-SetCCGEP.ll
index 4f67c7d332acf..4b3a89cd92b5a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/2005-01-28-SetCCGEP.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/2005-01-28-SetCCGEP.ll
@@ -1,10 +1,10 @@
 ; RUN: opt < %s -passes=instsimplify -S | \
 ; RUN:    not grep "ret i1 false"
 
- at b = external global [2 x {  }]         ; <[2 x {  }]*> [#uses=2]
+ at b = external global [2 x {  }]         ; <ptr> [#uses=2]
 
 define i1 @f() {
-        %tmp.2 = icmp eq {  }* getelementptr ([2 x {  }], [2 x {  }]* @b, i32 0, i32 0), getelementptr ([2 x {  }], [2 x {  }]* @b, i32 0, i32 1)                ; <i1> [#uses=1]
+        %tmp.2 = icmp eq ptr @b, getelementptr ([2 x {  }], ptr @b, i32 0, i32 1)                ; <i1> [#uses=1]
         ret i1 %tmp.2
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/2008-07-07-VectorCompare.ll b/llvm/test/Transforms/InstSimplify/ConstProp/2008-07-07-VectorCompare.ll
index 5ec6bf6158db2..4939da63d30ca 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/2008-07-07-VectorCompare.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/2008-07-07-VectorCompare.ll
@@ -1,12 +1,12 @@
 ; RUN: opt < %s -passes=instsimplify -disable-output
 ; PR2529
-define <4 x i1> @test1(i32 %argc, i8** %argv) {
+define <4 x i1> @test1(i32 %argc, ptr %argv) {
 entry:  
         %foo = icmp slt <4 x i32> undef, <i32 14, i32 undef, i32 undef, i32 undef>
         ret <4 x i1> %foo
 }
 
-define <4 x i1> @test2(i32 %argc, i8** %argv) {
+define <4 x i1> @test2(i32 %argc, ptr %argv) {
 entry:  
         %foo = icmp slt <4 x i32> <i32 undef, i32 undef, i32 undef, i32
 undef>, <i32 undef, i32 undef, i32 undef, i32 undef>

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/2009-06-20-constexpr-zero-lhs.ll b/llvm/test/Transforms/InstSimplify/ConstProp/2009-06-20-constexpr-zero-lhs.ll
index 332260590ae16..a8c34b90ed05a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/2009-06-20-constexpr-zero-lhs.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/2009-06-20-constexpr-zero-lhs.ll
@@ -1,11 +1,11 @@
 ; RUN: llvm-as < %s | llvm-dis | not grep ptrtoint
 ; PR4424
 @G = external global i32
- at test1 = constant i32 sdiv (i32 0, i32 ptrtoint (i32* @G to i32))
- at test2 = constant i32 udiv (i32 0, i32 ptrtoint (i32* @G to i32))
- at test3 = constant i32 srem (i32 0, i32 ptrtoint (i32* @G to i32))
- at test4 = constant i32 urem (i32 0, i32 ptrtoint (i32* @G to i32))
- at test5 = constant i32 lshr (i32 0, i32 ptrtoint (i32* @G to i32))
- at test6 = constant i32 ashr (i32 0, i32 ptrtoint (i32* @G to i32))
- at test7 = constant i32 shl (i32 0, i32 ptrtoint (i32* @G to i32))
+ at test1 = constant i32 sdiv (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test2 = constant i32 udiv (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test3 = constant i32 srem (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test4 = constant i32 urem (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test5 = constant i32 lshr (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test6 = constant i32 ashr (i32 0, i32 ptrtoint (ptr @G to i32))
+ at test7 = constant i32 shl (i32 0, i32 ptrtoint (ptr @G to i32))
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/2009-09-01-GEP-Crash.ll b/llvm/test/Transforms/InstSimplify/ConstProp/2009-09-01-GEP-Crash.ll
index 0caa1008694ac..21c4da0dafd06 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/2009-09-01-GEP-Crash.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/2009-09-01-GEP-Crash.ll
@@ -6,19 +6,18 @@ target triple = "x86_64-unknown-linux-gnu"
 %0 = type { %struct.anon }
 %1 = type { %0, %2, [24 x i8] }
 %2 = type <{ %3, %3 }>
-%3 = type { %struct.hrtimer_cpu_base*, i32, %struct.rb_root, %struct.rb_node*, %struct.pgprot, i64 ()*, [16 x i8] }
+%3 = type { ptr, i32, %struct.rb_root, ptr, %struct.pgprot, ptr, [16 x i8] }
 %struct.anon = type { }
-%struct.hrtimer_clock_base = type { %struct.hrtimer_cpu_base*, i32, %struct.rb_root, %struct.rb_node*, %struct.pgprot, i64 ()*, %struct.pgprot, %struct.pgprot }
+%struct.hrtimer_clock_base = type { ptr, i32, %struct.rb_root, ptr, %struct.pgprot, ptr, %struct.pgprot, %struct.pgprot }
 %struct.hrtimer_cpu_base = type { %0, [2 x %struct.hrtimer_clock_base], %struct.pgprot, i32, i64 }
 %struct.pgprot = type { i64 }
-%struct.rb_node = type { i64, %struct.rb_node*, %struct.rb_node* }
-%struct.rb_root = type { %struct.rb_node* }
+%struct.rb_node = type { i64, ptr, ptr }
+%struct.rb_root = type { ptr }
 
- at per_cpu__hrtimer_bases = external global %1, align 8 ; <%1*> [#uses=1]
+ at per_cpu__hrtimer_bases = external global %1, align 8 ; <ptr> [#uses=1]
 
 define void @init_hrtimers_cpu(i32 %cpu) nounwind noredzone section ".cpuinit.text" {
 entry:
-  %tmp3 = getelementptr %struct.hrtimer_cpu_base, %struct.hrtimer_cpu_base* bitcast (%1* @per_cpu__hrtimer_bases to %struct.hrtimer_cpu_base*), i32 0, i32 0 ; <%0*> [#uses=1]
-  %tmp5 = bitcast %0* %tmp3 to i8*                ; <i8*> [#uses=0]
+  %tmp3 = getelementptr %struct.hrtimer_cpu_base, ptr @per_cpu__hrtimer_bases, i32 0, i32 0 ; <ptr> [#uses=1]
   unreachable
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cos.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cos.ll
index dadae81b72402..5368da112ab46 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cos.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cos.ll
@@ -5,237 +5,237 @@ declare half @llvm.amdgcn.cos.f16(half) #0
 declare float @llvm.amdgcn.cos.f32(float) #0
 declare double @llvm.amdgcn.cos.f64(double) #0
 
-define void @test_f16(half* %p) {
+define void @test_f16(ptr %p) {
 ; CHECK-LABEL: @test_f16(
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P:%.*]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH39A8, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH39A8, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xHBC00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xHBC00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P:%.*]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH39A8, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH39A8, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xHBC00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xHBC00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
 ; CHECK-NEXT:    [[P1000:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH63D0)
-; CHECK-NEXT:    store volatile half [[P1000]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[P1000]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[N1000:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xHE3D0)
-; CHECK-NEXT:    store volatile half [[N1000]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[N1000]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[PINF:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH7C00)
-; CHECK-NEXT:    store volatile half [[PINF]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[PINF]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[NINF:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xHFC00)
-; CHECK-NEXT:    store volatile half [[NINF]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[NINF]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[NAN:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH7E00)
-; CHECK-NEXT:    store volatile half [[NAN]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[NAN]], ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call half @llvm.amdgcn.cos.f16(half +0.0)
-  store volatile half %p0, half* %p
+  store volatile half %p0, ptr %p
   %n0 = call half @llvm.amdgcn.cos.f16(half -0.0)
-  store volatile half %n0, half* %p
+  store volatile half %n0, ptr %p
   %p0125 = call half @llvm.amdgcn.cos.f16(half +0.125)
-  store volatile half %p0125, half* %p
+  store volatile half %p0125, ptr %p
   %n0125 = call half @llvm.amdgcn.cos.f16(half -0.125)
-  store volatile half %n0125, half* %p
+  store volatile half %n0125, ptr %p
   %p025 = call half @llvm.amdgcn.cos.f16(half +0.25)
-  store volatile half %p025, half* %p
+  store volatile half %p025, ptr %p
   %n025 = call half @llvm.amdgcn.cos.f16(half -0.25)
-  store volatile half %n025, half* %p
+  store volatile half %n025, ptr %p
   %p05 = call half @llvm.amdgcn.cos.f16(half +0.5)
-  store volatile half %p05, half* %p
+  store volatile half %p05, ptr %p
   %n05 = call half @llvm.amdgcn.cos.f16(half -0.5)
-  store volatile half %n05, half* %p
+  store volatile half %n05, ptr %p
   %p1 = call half @llvm.amdgcn.cos.f16(half +1.0)
-  store volatile half %p1, half* %p
+  store volatile half %p1, ptr %p
   %n1 = call half @llvm.amdgcn.cos.f16(half -1.0)
-  store volatile half %n1, half* %p
+  store volatile half %n1, ptr %p
   %p256 = call half @llvm.amdgcn.cos.f16(half +256.0)
-  store volatile half %p256, half* %p
+  store volatile half %p256, ptr %p
   %n256 = call half @llvm.amdgcn.cos.f16(half -256.0)
-  store volatile half %n256, half* %p
+  store volatile half %n256, ptr %p
   %p1000 = call half @llvm.amdgcn.cos.f16(half +1000.0)
-  store volatile half %p1000, half* %p
+  store volatile half %p1000, ptr %p
   %n1000 = call half @llvm.amdgcn.cos.f16(half -1000.0)
-  store volatile half %n1000, half* %p
+  store volatile half %n1000, ptr %p
   %pinf = call half @llvm.amdgcn.cos.f16(half 0xH7C00) ; +inf
-  store volatile half %pinf, half* %p
+  store volatile half %pinf, ptr %p
   %ninf = call half @llvm.amdgcn.cos.f16(half 0xHFC00) ; -inf
-  store volatile half %ninf, half* %p
+  store volatile half %ninf, ptr %p
   %nan = call half @llvm.amdgcn.cos.f16(half 0xH7E00) ; nan
-  store volatile half %nan, half* %p
+  store volatile half %nan, ptr %p
   ret void
 }
 
-define void @test_f32(float* %p) {
+define void @test_f32(ptr %p) {
 ; CHECK-LABEL: @test_f32(
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float -1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float -1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float -1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float -1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
 ; CHECK-NEXT:    [[P1000:%.*]] = call float @llvm.amdgcn.cos.f32(float 1.000000e+03)
-; CHECK-NEXT:    store volatile float [[P1000]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[P1000]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[N1000:%.*]] = call float @llvm.amdgcn.cos.f32(float -1.000000e+03)
-; CHECK-NEXT:    store volatile float [[N1000]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[N1000]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[PINF:%.*]] = call float @llvm.amdgcn.cos.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile float [[PINF]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[PINF]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[NINF:%.*]] = call float @llvm.amdgcn.cos.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile float [[NINF]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[NINF]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[NAN:%.*]] = call float @llvm.amdgcn.cos.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile float [[NAN]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[NAN]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call float @llvm.amdgcn.cos.f32(float +0.0)
-  store volatile float %p0, float* %p
+  store volatile float %p0, ptr %p
   %n0 = call float @llvm.amdgcn.cos.f32(float -0.0)
-  store volatile float %n0, float* %p
+  store volatile float %n0, ptr %p
   %p0125 = call float @llvm.amdgcn.cos.f32(float +0.125)
-  store volatile float %p0125, float* %p
+  store volatile float %p0125, ptr %p
   %n0125 = call float @llvm.amdgcn.cos.f32(float -0.125)
-  store volatile float %n0125, float* %p
+  store volatile float %n0125, ptr %p
   %p025 = call float @llvm.amdgcn.cos.f32(float +0.25)
-  store volatile float %p025, float* %p
+  store volatile float %p025, ptr %p
   %n025 = call float @llvm.amdgcn.cos.f32(float -0.25)
-  store volatile float %n025, float* %p
+  store volatile float %n025, ptr %p
   %p05 = call float @llvm.amdgcn.cos.f32(float +0.5)
-  store volatile float %p05, float* %p
+  store volatile float %p05, ptr %p
   %n05 = call float @llvm.amdgcn.cos.f32(float -0.5)
-  store volatile float %n05, float* %p
+  store volatile float %n05, ptr %p
   %p1 = call float @llvm.amdgcn.cos.f32(float +1.0)
-  store volatile float %p1, float* %p
+  store volatile float %p1, ptr %p
   %n1 = call float @llvm.amdgcn.cos.f32(float -1.0)
-  store volatile float %n1, float* %p
+  store volatile float %n1, ptr %p
   %p256 = call float @llvm.amdgcn.cos.f32(float +256.0)
-  store volatile float %p256, float* %p
+  store volatile float %p256, ptr %p
   %n256 = call float @llvm.amdgcn.cos.f32(float -256.0)
-  store volatile float %n256, float* %p
+  store volatile float %n256, ptr %p
   %p1000 = call float @llvm.amdgcn.cos.f32(float +1000.0)
-  store volatile float %p1000, float* %p
+  store volatile float %p1000, ptr %p
   %n1000 = call float @llvm.amdgcn.cos.f32(float -1000.0)
-  store volatile float %n1000, float* %p
+  store volatile float %n1000, ptr %p
   %pinf = call float @llvm.amdgcn.cos.f32(float 0x7FF0000000000000) ; +inf
-  store volatile float %pinf, float* %p
+  store volatile float %pinf, ptr %p
   %ninf = call float @llvm.amdgcn.cos.f32(float 0xFFF0000000000000) ; -inf
-  store volatile float %ninf, float* %p
+  store volatile float %ninf, ptr %p
   %nan = call float @llvm.amdgcn.cos.f32(float 0x7FF8000000000000) ; nan
-  store volatile float %nan, float* %p
+  store volatile float %nan, ptr %p
   ret void
 }
 
-define void @test_f64(double* %p) {
+define void @test_f64(ptr %p) {
 ; CHECK-LABEL: @test_f64(
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double -1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double -1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double -1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double -1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
 ; CHECK-NEXT:    [[P1000:%.*]] = call double @llvm.amdgcn.cos.f64(double 1.000000e+03)
-; CHECK-NEXT:    store volatile double [[P1000]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[P1000]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[N1000:%.*]] = call double @llvm.amdgcn.cos.f64(double -1.000000e+03)
-; CHECK-NEXT:    store volatile double [[N1000]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[N1000]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[PINF:%.*]] = call double @llvm.amdgcn.cos.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile double [[PINF]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[PINF]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[NINF:%.*]] = call double @llvm.amdgcn.cos.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile double [[NINF]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[NINF]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[NAN:%.*]] = call double @llvm.amdgcn.cos.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile double [[NAN]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[NAN]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call double @llvm.amdgcn.cos.f64(double +0.0)
-  store volatile double %p0, double* %p
+  store volatile double %p0, ptr %p
   %n0 = call double @llvm.amdgcn.cos.f64(double -0.0)
-  store volatile double %n0, double* %p
+  store volatile double %n0, ptr %p
   %p0125 = call double @llvm.amdgcn.cos.f64(double +0.125)
-  store volatile double %p0125, double* %p
+  store volatile double %p0125, ptr %p
   %n0125 = call double @llvm.amdgcn.cos.f64(double -0.125)
-  store volatile double %n0125, double* %p
+  store volatile double %n0125, ptr %p
   %p025 = call double @llvm.amdgcn.cos.f64(double +0.25)
-  store volatile double %p025, double* %p
+  store volatile double %p025, ptr %p
   %n025 = call double @llvm.amdgcn.cos.f64(double -0.25)
-  store volatile double %n025, double* %p
+  store volatile double %n025, ptr %p
   %p05 = call double @llvm.amdgcn.cos.f64(double +0.5)
-  store volatile double %p05, double* %p
+  store volatile double %p05, ptr %p
   %n05 = call double @llvm.amdgcn.cos.f64(double -0.5)
-  store volatile double %n05, double* %p
+  store volatile double %n05, ptr %p
   %p1 = call double @llvm.amdgcn.cos.f64(double +1.0)
-  store volatile double %p1, double* %p
+  store volatile double %p1, ptr %p
   %n1 = call double @llvm.amdgcn.cos.f64(double -1.0)
-  store volatile double %n1, double* %p
+  store volatile double %n1, ptr %p
   %p256 = call double @llvm.amdgcn.cos.f64(double +256.0)
-  store volatile double %p256, double* %p
+  store volatile double %p256, ptr %p
   %n256 = call double @llvm.amdgcn.cos.f64(double -256.0)
-  store volatile double %n256, double* %p
+  store volatile double %n256, ptr %p
   %p1000 = call double @llvm.amdgcn.cos.f64(double +1000.0)
-  store volatile double %p1000, double* %p
+  store volatile double %p1000, ptr %p
   %n1000 = call double @llvm.amdgcn.cos.f64(double -1000.0)
-  store volatile double %n1000, double* %p
+  store volatile double %n1000, ptr %p
   %pinf = call double @llvm.amdgcn.cos.f64(double 0x7FF0000000000000) ; +inf
-  store volatile double %pinf, double* %p
+  store volatile double %pinf, ptr %p
   %ninf = call double @llvm.amdgcn.cos.f64(double 0xFFF0000000000000) ; -inf
-  store volatile double %ninf, double* %p
+  store volatile double %ninf, ptr %p
   %nan = call double @llvm.amdgcn.cos.f64(double 0x7FF8000000000000) ; nan
-  store volatile double %nan, double* %p
+  store volatile double %nan, ptr %p
   ret void
 }
 
-define void @test_f16_strictfp (half* %p) #1 {
+define void @test_f16_strictfp (ptr %p) #1 {
 ; CHECK-LABEL: @test_f16_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH0000) #1
-; CHECK-NEXT:    store volatile half [[P0]], half* [[P:%.*]], align 2
+; CHECK-NEXT:    store volatile half [[P0]], ptr [[P:%.*]], align 2
 ; CHECK-NEXT:    [[P025:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH3400) #1
-; CHECK-NEXT:    store volatile half [[P025]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[P025]], ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call half @llvm.amdgcn.cos.f16(half +0.0) #1
-  store volatile half %p0, half* %p
+  store volatile half %p0, ptr %p
   %p025 = call half @llvm.amdgcn.cos.f16(half +0.25) #1
-  store volatile half %p025, half* %p
+  store volatile half %p025, ptr %p
   ret void
 }
 
-define void @test_f32_strictfp(float* %p) #1 {
+define void @test_f32_strictfp(ptr %p) #1 {
 ; CHECK-LABEL: @test_f32_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call float @llvm.amdgcn.cos.f32(float 0.000000e+00) #1
-; CHECK-NEXT:    store volatile float [[P0]], float* [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float [[P0]], ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[P025:%.*]] = call float @llvm.amdgcn.cos.f32(float 2.500000e-01) #1
-; CHECK-NEXT:    store volatile float [[P025]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[P025]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call float @llvm.amdgcn.cos.f32(float +0.0) #1
-  store volatile float %p0, float* %p
+  store volatile float %p0, ptr %p
   %p025 = call float @llvm.amdgcn.cos.f32(float +0.25) #1
-  store volatile float %p025, float* %p
+  store volatile float %p025, ptr %p
   ret void
 }
 
-define void @test_f64_strictfp(double* %p) #1 {
+define void @test_f64_strictfp(ptr %p) #1 {
 ; CHECK-LABEL: @test_f64_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call double @llvm.amdgcn.cos.f64(double 0.000000e+00) #1
-; CHECK-NEXT:    store volatile double [[P0]], double* [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile double [[P0]], ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    [[P025:%.*]] = call double @llvm.amdgcn.cos.f64(double 2.500000e-01) #1
-; CHECK-NEXT:    store volatile double [[P025]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[P025]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call double @llvm.amdgcn.cos.f64(double +0.0) #1
-  store volatile double %p0, double* %p
+  store volatile double %p0, ptr %p
   %p025 = call double @llvm.amdgcn.cos.f64(double +0.25) #1
-  store volatile double %p025, double* %p
+  store volatile double %p025, ptr %p
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubeid.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubeid.ll
index 8a033661d1244..dece3dfd2f111 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubeid.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubeid.ll
@@ -3,153 +3,153 @@
 
 declare float @llvm.amdgcn.cubeid(float, float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 5.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 5.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p3p4p5 = call float @llvm.amdgcn.cubeid(float +3.0, float +4.0, float +5.0)
-  store volatile float %p3p4p5, float* %p
+  store volatile float %p3p4p5, ptr %p
   %p3p5p4 = call float @llvm.amdgcn.cubeid(float +3.0, float +5.0, float +4.0)
-  store volatile float %p3p5p4, float* %p
+  store volatile float %p3p5p4, ptr %p
   %p4p3p5 = call float @llvm.amdgcn.cubeid(float +4.0, float +3.0, float +5.0)
-  store volatile float %p4p3p5, float* %p
+  store volatile float %p4p3p5, ptr %p
   %p4p5p3 = call float @llvm.amdgcn.cubeid(float +4.0, float +5.0, float +3.0)
-  store volatile float %p4p5p3, float* %p
+  store volatile float %p4p5p3, ptr %p
   %p5p3p4 = call float @llvm.amdgcn.cubeid(float +5.0, float +3.0, float +4.0)
-  store volatile float %p5p3p4, float* %p
+  store volatile float %p5p3p4, ptr %p
   %p5p4p3 = call float @llvm.amdgcn.cubeid(float +5.0, float +4.0, float +3.0)
-  store volatile float %p5p4p3, float* %p
+  store volatile float %p5p4p3, ptr %p
   %p3p4n5 = call float @llvm.amdgcn.cubeid(float +3.0, float +4.0, float -5.0)
-  store volatile float %p3p4n5, float* %p
+  store volatile float %p3p4n5, ptr %p
   %p3p5n4 = call float @llvm.amdgcn.cubeid(float +3.0, float +5.0, float -4.0)
-  store volatile float %p3p5n4, float* %p
+  store volatile float %p3p5n4, ptr %p
   %p4p3n5 = call float @llvm.amdgcn.cubeid(float +4.0, float +3.0, float -5.0)
-  store volatile float %p4p3n5, float* %p
+  store volatile float %p4p3n5, ptr %p
   %p4p5n3 = call float @llvm.amdgcn.cubeid(float +4.0, float +5.0, float -3.0)
-  store volatile float %p4p5n3, float* %p
+  store volatile float %p4p5n3, ptr %p
   %p5p3n4 = call float @llvm.amdgcn.cubeid(float +5.0, float +3.0, float -4.0)
-  store volatile float %p5p3n4, float* %p
+  store volatile float %p5p3n4, ptr %p
   %p5p4n3 = call float @llvm.amdgcn.cubeid(float +5.0, float +4.0, float -3.0)
-  store volatile float %p5p4n3, float* %p
+  store volatile float %p5p4n3, ptr %p
   %p3n4p5 = call float @llvm.amdgcn.cubeid(float +3.0, float -4.0, float +5.0)
-  store volatile float %p3n4p5, float* %p
+  store volatile float %p3n4p5, ptr %p
   %p3n5p4 = call float @llvm.amdgcn.cubeid(float +3.0, float -5.0, float +4.0)
-  store volatile float %p3n5p4, float* %p
+  store volatile float %p3n5p4, ptr %p
   %p4n3p5 = call float @llvm.amdgcn.cubeid(float +4.0, float -3.0, float +5.0)
-  store volatile float %p4n3p5, float* %p
+  store volatile float %p4n3p5, ptr %p
   %p4n5p3 = call float @llvm.amdgcn.cubeid(float +4.0, float -5.0, float +3.0)
-  store volatile float %p4n5p3, float* %p
+  store volatile float %p4n5p3, ptr %p
   %p5n3p4 = call float @llvm.amdgcn.cubeid(float +5.0, float -3.0, float +4.0)
-  store volatile float %p5n3p4, float* %p
+  store volatile float %p5n3p4, ptr %p
   %p5n4p3 = call float @llvm.amdgcn.cubeid(float +5.0, float -4.0, float +3.0)
-  store volatile float %p5n4p3, float* %p
+  store volatile float %p5n4p3, ptr %p
   %p3n4n5 = call float @llvm.amdgcn.cubeid(float +3.0, float -4.0, float -5.0)
-  store volatile float %p3n4n5, float* %p
+  store volatile float %p3n4n5, ptr %p
   %p3n5n4 = call float @llvm.amdgcn.cubeid(float +3.0, float -5.0, float -4.0)
-  store volatile float %p3n5n4, float* %p
+  store volatile float %p3n5n4, ptr %p
   %p4n3n5 = call float @llvm.amdgcn.cubeid(float +4.0, float -3.0, float -5.0)
-  store volatile float %p4n3n5, float* %p
+  store volatile float %p4n3n5, ptr %p
   %p4n5n3 = call float @llvm.amdgcn.cubeid(float +4.0, float -5.0, float -3.0)
-  store volatile float %p4n5n3, float* %p
+  store volatile float %p4n5n3, ptr %p
   %p5n3n4 = call float @llvm.amdgcn.cubeid(float +5.0, float -3.0, float -4.0)
-  store volatile float %p5n3n4, float* %p
+  store volatile float %p5n3n4, ptr %p
   %p5n4n3 = call float @llvm.amdgcn.cubeid(float +5.0, float -4.0, float -3.0)
-  store volatile float %p5n4n3, float* %p
+  store volatile float %p5n4n3, ptr %p
   %n3p4p5 = call float @llvm.amdgcn.cubeid(float -3.0, float +4.0, float +5.0)
-  store volatile float %n3p4p5, float* %p
+  store volatile float %n3p4p5, ptr %p
   %n3p5p4 = call float @llvm.amdgcn.cubeid(float -3.0, float +5.0, float +4.0)
-  store volatile float %n3p5p4, float* %p
+  store volatile float %n3p5p4, ptr %p
   %n4p3p5 = call float @llvm.amdgcn.cubeid(float -4.0, float +3.0, float +5.0)
-  store volatile float %n4p3p5, float* %p
+  store volatile float %n4p3p5, ptr %p
   %n4p5p3 = call float @llvm.amdgcn.cubeid(float -4.0, float +5.0, float +3.0)
-  store volatile float %n4p5p3, float* %p
+  store volatile float %n4p5p3, ptr %p
   %n5p3p4 = call float @llvm.amdgcn.cubeid(float -5.0, float +3.0, float +4.0)
-  store volatile float %n5p3p4, float* %p
+  store volatile float %n5p3p4, ptr %p
   %n5p4p3 = call float @llvm.amdgcn.cubeid(float -5.0, float +4.0, float +3.0)
-  store volatile float %n5p4p3, float* %p
+  store volatile float %n5p4p3, ptr %p
   %n3p4n5 = call float @llvm.amdgcn.cubeid(float -3.0, float +4.0, float -5.0)
-  store volatile float %n3p4n5, float* %p
+  store volatile float %n3p4n5, ptr %p
   %n3p5n4 = call float @llvm.amdgcn.cubeid(float -3.0, float +5.0, float -4.0)
-  store volatile float %n3p5n4, float* %p
+  store volatile float %n3p5n4, ptr %p
   %n4p3n5 = call float @llvm.amdgcn.cubeid(float -4.0, float +3.0, float -5.0)
-  store volatile float %n4p3n5, float* %p
+  store volatile float %n4p3n5, ptr %p
   %n4p5n3 = call float @llvm.amdgcn.cubeid(float -4.0, float +5.0, float -3.0)
-  store volatile float %n4p5n3, float* %p
+  store volatile float %n4p5n3, ptr %p
   %n5p3n4 = call float @llvm.amdgcn.cubeid(float -5.0, float +3.0, float -4.0)
-  store volatile float %n5p3n4, float* %p
+  store volatile float %n5p3n4, ptr %p
   %n5p4n3 = call float @llvm.amdgcn.cubeid(float -5.0, float +4.0, float -3.0)
-  store volatile float %n5p4n3, float* %p
+  store volatile float %n5p4n3, ptr %p
   %n3n4p5 = call float @llvm.amdgcn.cubeid(float -3.0, float -4.0, float +5.0)
-  store volatile float %n3n4p5, float* %p
+  store volatile float %n3n4p5, ptr %p
   %n3n5p4 = call float @llvm.amdgcn.cubeid(float -3.0, float -5.0, float +4.0)
-  store volatile float %n3n5p4, float* %p
+  store volatile float %n3n5p4, ptr %p
   %n4n3p5 = call float @llvm.amdgcn.cubeid(float -4.0, float -3.0, float +5.0)
-  store volatile float %n4n3p5, float* %p
+  store volatile float %n4n3p5, ptr %p
   %n4n5p3 = call float @llvm.amdgcn.cubeid(float -4.0, float -5.0, float +3.0)
-  store volatile float %n4n5p3, float* %p
+  store volatile float %n4n5p3, ptr %p
   %n5n3p4 = call float @llvm.amdgcn.cubeid(float -5.0, float -3.0, float +4.0)
-  store volatile float %n5n3p4, float* %p
+  store volatile float %n5n3p4, ptr %p
   %n5n4p3 = call float @llvm.amdgcn.cubeid(float -5.0, float -4.0, float +3.0)
-  store volatile float %n5n4p3, float* %p
+  store volatile float %n5n4p3, ptr %p
   %n3n4n5 = call float @llvm.amdgcn.cubeid(float -3.0, float -4.0, float -5.0)
-  store volatile float %n3n4n5, float* %p
+  store volatile float %n3n4n5, ptr %p
   %n3n5n4 = call float @llvm.amdgcn.cubeid(float -3.0, float -5.0, float -4.0)
-  store volatile float %n3n5n4, float* %p
+  store volatile float %n3n5n4, ptr %p
   %n4n3n5 = call float @llvm.amdgcn.cubeid(float -4.0, float -3.0, float -5.0)
-  store volatile float %n4n3n5, float* %p
+  store volatile float %n4n3n5, ptr %p
   %n4n5n3 = call float @llvm.amdgcn.cubeid(float -4.0, float -5.0, float -3.0)
-  store volatile float %n4n5n3, float* %p
+  store volatile float %n4n5n3, ptr %p
   %n5n3n4 = call float @llvm.amdgcn.cubeid(float -5.0, float -3.0, float -4.0)
-  store volatile float %n5n3n4, float* %p
+  store volatile float %n5n3n4, ptr %p
   %n5n4n3 = call float @llvm.amdgcn.cubeid(float -5.0, float -4.0, float -3.0)
-  store volatile float %n5n4n3, float* %p
+  store volatile float %n5n4n3, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubema.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubema.ll
index 97ab7b4229b6c..ab6deeaf6c919 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubema.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubema.ll
@@ -3,153 +3,153 @@
 
 declare float @llvm.amdgcn.cubema(float, float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
-; CHECK-NEXT:    store volatile float -1.000000e+01, float* [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
+; CHECK-NEXT:    store volatile float -1.000000e+01, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p3p4p5 = call float @llvm.amdgcn.cubema(float +3.0, float +4.0, float +5.0)
-  store volatile float %p3p4p5, float* %p
+  store volatile float %p3p4p5, ptr %p
   %p3p5p4 = call float @llvm.amdgcn.cubema(float +3.0, float +5.0, float +4.0)
-  store volatile float %p3p5p4, float* %p
+  store volatile float %p3p5p4, ptr %p
   %p4p3p5 = call float @llvm.amdgcn.cubema(float +4.0, float +3.0, float +5.0)
-  store volatile float %p4p3p5, float* %p
+  store volatile float %p4p3p5, ptr %p
   %p4p5p3 = call float @llvm.amdgcn.cubema(float +4.0, float +5.0, float +3.0)
-  store volatile float %p4p5p3, float* %p
+  store volatile float %p4p5p3, ptr %p
   %p5p3p4 = call float @llvm.amdgcn.cubema(float +5.0, float +3.0, float +4.0)
-  store volatile float %p5p3p4, float* %p
+  store volatile float %p5p3p4, ptr %p
   %p5p4p3 = call float @llvm.amdgcn.cubema(float +5.0, float +4.0, float +3.0)
-  store volatile float %p5p4p3, float* %p
+  store volatile float %p5p4p3, ptr %p
   %p3p4n5 = call float @llvm.amdgcn.cubema(float +3.0, float +4.0, float -5.0)
-  store volatile float %p3p4n5, float* %p
+  store volatile float %p3p4n5, ptr %p
   %p3p5n4 = call float @llvm.amdgcn.cubema(float +3.0, float +5.0, float -4.0)
-  store volatile float %p3p5n4, float* %p
+  store volatile float %p3p5n4, ptr %p
   %p4p3n5 = call float @llvm.amdgcn.cubema(float +4.0, float +3.0, float -5.0)
-  store volatile float %p4p3n5, float* %p
+  store volatile float %p4p3n5, ptr %p
   %p4p5n3 = call float @llvm.amdgcn.cubema(float +4.0, float +5.0, float -3.0)
-  store volatile float %p4p5n3, float* %p
+  store volatile float %p4p5n3, ptr %p
   %p5p3n4 = call float @llvm.amdgcn.cubema(float +5.0, float +3.0, float -4.0)
-  store volatile float %p5p3n4, float* %p
+  store volatile float %p5p3n4, ptr %p
   %p5p4n3 = call float @llvm.amdgcn.cubema(float +5.0, float +4.0, float -3.0)
-  store volatile float %p5p4n3, float* %p
+  store volatile float %p5p4n3, ptr %p
   %p3n4p5 = call float @llvm.amdgcn.cubema(float +3.0, float -4.0, float +5.0)
-  store volatile float %p3n4p5, float* %p
+  store volatile float %p3n4p5, ptr %p
   %p3n5p4 = call float @llvm.amdgcn.cubema(float +3.0, float -5.0, float +4.0)
-  store volatile float %p3n5p4, float* %p
+  store volatile float %p3n5p4, ptr %p
   %p4n3p5 = call float @llvm.amdgcn.cubema(float +4.0, float -3.0, float +5.0)
-  store volatile float %p4n3p5, float* %p
+  store volatile float %p4n3p5, ptr %p
   %p4n5p3 = call float @llvm.amdgcn.cubema(float +4.0, float -5.0, float +3.0)
-  store volatile float %p4n5p3, float* %p
+  store volatile float %p4n5p3, ptr %p
   %p5n3p4 = call float @llvm.amdgcn.cubema(float +5.0, float -3.0, float +4.0)
-  store volatile float %p5n3p4, float* %p
+  store volatile float %p5n3p4, ptr %p
   %p5n4p3 = call float @llvm.amdgcn.cubema(float +5.0, float -4.0, float +3.0)
-  store volatile float %p5n4p3, float* %p
+  store volatile float %p5n4p3, ptr %p
   %p3n4n5 = call float @llvm.amdgcn.cubema(float +3.0, float -4.0, float -5.0)
-  store volatile float %p3n4n5, float* %p
+  store volatile float %p3n4n5, ptr %p
   %p3n5n4 = call float @llvm.amdgcn.cubema(float +3.0, float -5.0, float -4.0)
-  store volatile float %p3n5n4, float* %p
+  store volatile float %p3n5n4, ptr %p
   %p4n3n5 = call float @llvm.amdgcn.cubema(float +4.0, float -3.0, float -5.0)
-  store volatile float %p4n3n5, float* %p
+  store volatile float %p4n3n5, ptr %p
   %p4n5n3 = call float @llvm.amdgcn.cubema(float +4.0, float -5.0, float -3.0)
-  store volatile float %p4n5n3, float* %p
+  store volatile float %p4n5n3, ptr %p
   %p5n3n4 = call float @llvm.amdgcn.cubema(float +5.0, float -3.0, float -4.0)
-  store volatile float %p5n3n4, float* %p
+  store volatile float %p5n3n4, ptr %p
   %p5n4n3 = call float @llvm.amdgcn.cubema(float +5.0, float -4.0, float -3.0)
-  store volatile float %p5n4n3, float* %p
+  store volatile float %p5n4n3, ptr %p
   %n3p4p5 = call float @llvm.amdgcn.cubema(float -3.0, float +4.0, float +5.0)
-  store volatile float %n3p4p5, float* %p
+  store volatile float %n3p4p5, ptr %p
   %n3p5p4 = call float @llvm.amdgcn.cubema(float -3.0, float +5.0, float +4.0)
-  store volatile float %n3p5p4, float* %p
+  store volatile float %n3p5p4, ptr %p
   %n4p3p5 = call float @llvm.amdgcn.cubema(float -4.0, float +3.0, float +5.0)
-  store volatile float %n4p3p5, float* %p
+  store volatile float %n4p3p5, ptr %p
   %n4p5p3 = call float @llvm.amdgcn.cubema(float -4.0, float +5.0, float +3.0)
-  store volatile float %n4p5p3, float* %p
+  store volatile float %n4p5p3, ptr %p
   %n5p3p4 = call float @llvm.amdgcn.cubema(float -5.0, float +3.0, float +4.0)
-  store volatile float %n5p3p4, float* %p
+  store volatile float %n5p3p4, ptr %p
   %n5p4p3 = call float @llvm.amdgcn.cubema(float -5.0, float +4.0, float +3.0)
-  store volatile float %n5p4p3, float* %p
+  store volatile float %n5p4p3, ptr %p
   %n3p4n5 = call float @llvm.amdgcn.cubema(float -3.0, float +4.0, float -5.0)
-  store volatile float %n3p4n5, float* %p
+  store volatile float %n3p4n5, ptr %p
   %n3p5n4 = call float @llvm.amdgcn.cubema(float -3.0, float +5.0, float -4.0)
-  store volatile float %n3p5n4, float* %p
+  store volatile float %n3p5n4, ptr %p
   %n4p3n5 = call float @llvm.amdgcn.cubema(float -4.0, float +3.0, float -5.0)
-  store volatile float %n4p3n5, float* %p
+  store volatile float %n4p3n5, ptr %p
   %n4p5n3 = call float @llvm.amdgcn.cubema(float -4.0, float +5.0, float -3.0)
-  store volatile float %n4p5n3, float* %p
+  store volatile float %n4p5n3, ptr %p
   %n5p3n4 = call float @llvm.amdgcn.cubema(float -5.0, float +3.0, float -4.0)
-  store volatile float %n5p3n4, float* %p
+  store volatile float %n5p3n4, ptr %p
   %n5p4n3 = call float @llvm.amdgcn.cubema(float -5.0, float +4.0, float -3.0)
-  store volatile float %n5p4n3, float* %p
+  store volatile float %n5p4n3, ptr %p
   %n3n4p5 = call float @llvm.amdgcn.cubema(float -3.0, float -4.0, float +5.0)
-  store volatile float %n3n4p5, float* %p
+  store volatile float %n3n4p5, ptr %p
   %n3n5p4 = call float @llvm.amdgcn.cubema(float -3.0, float -5.0, float +4.0)
-  store volatile float %n3n5p4, float* %p
+  store volatile float %n3n5p4, ptr %p
   %n4n3p5 = call float @llvm.amdgcn.cubema(float -4.0, float -3.0, float +5.0)
-  store volatile float %n4n3p5, float* %p
+  store volatile float %n4n3p5, ptr %p
   %n4n5p3 = call float @llvm.amdgcn.cubema(float -4.0, float -5.0, float +3.0)
-  store volatile float %n4n5p3, float* %p
+  store volatile float %n4n5p3, ptr %p
   %n5n3p4 = call float @llvm.amdgcn.cubema(float -5.0, float -3.0, float +4.0)
-  store volatile float %n5n3p4, float* %p
+  store volatile float %n5n3p4, ptr %p
   %n5n4p3 = call float @llvm.amdgcn.cubema(float -5.0, float -4.0, float +3.0)
-  store volatile float %n5n4p3, float* %p
+  store volatile float %n5n4p3, ptr %p
   %n3n4n5 = call float @llvm.amdgcn.cubema(float -3.0, float -4.0, float -5.0)
-  store volatile float %n3n4n5, float* %p
+  store volatile float %n3n4n5, ptr %p
   %n3n5n4 = call float @llvm.amdgcn.cubema(float -3.0, float -5.0, float -4.0)
-  store volatile float %n3n5n4, float* %p
+  store volatile float %n3n5n4, ptr %p
   %n4n3n5 = call float @llvm.amdgcn.cubema(float -4.0, float -3.0, float -5.0)
-  store volatile float %n4n3n5, float* %p
+  store volatile float %n4n3n5, ptr %p
   %n4n5n3 = call float @llvm.amdgcn.cubema(float -4.0, float -5.0, float -3.0)
-  store volatile float %n4n5n3, float* %p
+  store volatile float %n4n5n3, ptr %p
   %n5n3n4 = call float @llvm.amdgcn.cubema(float -5.0, float -3.0, float -4.0)
-  store volatile float %n5n3n4, float* %p
+  store volatile float %n5n3n4, ptr %p
   %n5n4n3 = call float @llvm.amdgcn.cubema(float -5.0, float -4.0, float -3.0)
-  store volatile float %n5n4n3, float* %p
+  store volatile float %n5n4n3, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubesc.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubesc.ll
index c6abdf8f51467..bb47bff163ab3 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubesc.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubesc.ll
@@ -3,153 +3,153 @@
 
 declare float @llvm.amdgcn.cubesc(float, float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p3p4p5 = call float @llvm.amdgcn.cubesc(float +3.0, float +4.0, float +5.0)
-  store volatile float %p3p4p5, float* %p
+  store volatile float %p3p4p5, ptr %p
   %p3p5p4 = call float @llvm.amdgcn.cubesc(float +3.0, float +5.0, float +4.0)
-  store volatile float %p3p5p4, float* %p
+  store volatile float %p3p5p4, ptr %p
   %p4p3p5 = call float @llvm.amdgcn.cubesc(float +4.0, float +3.0, float +5.0)
-  store volatile float %p4p3p5, float* %p
+  store volatile float %p4p3p5, ptr %p
   %p4p5p3 = call float @llvm.amdgcn.cubesc(float +4.0, float +5.0, float +3.0)
-  store volatile float %p4p5p3, float* %p
+  store volatile float %p4p5p3, ptr %p
   %p5p3p4 = call float @llvm.amdgcn.cubesc(float +5.0, float +3.0, float +4.0)
-  store volatile float %p5p3p4, float* %p
+  store volatile float %p5p3p4, ptr %p
   %p5p4p3 = call float @llvm.amdgcn.cubesc(float +5.0, float +4.0, float +3.0)
-  store volatile float %p5p4p3, float* %p
+  store volatile float %p5p4p3, ptr %p
   %p3p4n5 = call float @llvm.amdgcn.cubesc(float +3.0, float +4.0, float -5.0)
-  store volatile float %p3p4n5, float* %p
+  store volatile float %p3p4n5, ptr %p
   %p3p5n4 = call float @llvm.amdgcn.cubesc(float +3.0, float +5.0, float -4.0)
-  store volatile float %p3p5n4, float* %p
+  store volatile float %p3p5n4, ptr %p
   %p4p3n5 = call float @llvm.amdgcn.cubesc(float +4.0, float +3.0, float -5.0)
-  store volatile float %p4p3n5, float* %p
+  store volatile float %p4p3n5, ptr %p
   %p4p5n3 = call float @llvm.amdgcn.cubesc(float +4.0, float +5.0, float -3.0)
-  store volatile float %p4p5n3, float* %p
+  store volatile float %p4p5n3, ptr %p
   %p5p3n4 = call float @llvm.amdgcn.cubesc(float +5.0, float +3.0, float -4.0)
-  store volatile float %p5p3n4, float* %p
+  store volatile float %p5p3n4, ptr %p
   %p5p4n3 = call float @llvm.amdgcn.cubesc(float +5.0, float +4.0, float -3.0)
-  store volatile float %p5p4n3, float* %p
+  store volatile float %p5p4n3, ptr %p
   %p3n4p5 = call float @llvm.amdgcn.cubesc(float +3.0, float -4.0, float +5.0)
-  store volatile float %p3n4p5, float* %p
+  store volatile float %p3n4p5, ptr %p
   %p3n5p4 = call float @llvm.amdgcn.cubesc(float +3.0, float -5.0, float +4.0)
-  store volatile float %p3n5p4, float* %p
+  store volatile float %p3n5p4, ptr %p
   %p4n3p5 = call float @llvm.amdgcn.cubesc(float +4.0, float -3.0, float +5.0)
-  store volatile float %p4n3p5, float* %p
+  store volatile float %p4n3p5, ptr %p
   %p4n5p3 = call float @llvm.amdgcn.cubesc(float +4.0, float -5.0, float +3.0)
-  store volatile float %p4n5p3, float* %p
+  store volatile float %p4n5p3, ptr %p
   %p5n3p4 = call float @llvm.amdgcn.cubesc(float +5.0, float -3.0, float +4.0)
-  store volatile float %p5n3p4, float* %p
+  store volatile float %p5n3p4, ptr %p
   %p5n4p3 = call float @llvm.amdgcn.cubesc(float +5.0, float -4.0, float +3.0)
-  store volatile float %p5n4p3, float* %p
+  store volatile float %p5n4p3, ptr %p
   %p3n4n5 = call float @llvm.amdgcn.cubesc(float +3.0, float -4.0, float -5.0)
-  store volatile float %p3n4n5, float* %p
+  store volatile float %p3n4n5, ptr %p
   %p3n5n4 = call float @llvm.amdgcn.cubesc(float +3.0, float -5.0, float -4.0)
-  store volatile float %p3n5n4, float* %p
+  store volatile float %p3n5n4, ptr %p
   %p4n3n5 = call float @llvm.amdgcn.cubesc(float +4.0, float -3.0, float -5.0)
-  store volatile float %p4n3n5, float* %p
+  store volatile float %p4n3n5, ptr %p
   %p4n5n3 = call float @llvm.amdgcn.cubesc(float +4.0, float -5.0, float -3.0)
-  store volatile float %p4n5n3, float* %p
+  store volatile float %p4n5n3, ptr %p
   %p5n3n4 = call float @llvm.amdgcn.cubesc(float +5.0, float -3.0, float -4.0)
-  store volatile float %p5n3n4, float* %p
+  store volatile float %p5n3n4, ptr %p
   %p5n4n3 = call float @llvm.amdgcn.cubesc(float +5.0, float -4.0, float -3.0)
-  store volatile float %p5n4n3, float* %p
+  store volatile float %p5n4n3, ptr %p
   %n3p4p5 = call float @llvm.amdgcn.cubesc(float -3.0, float +4.0, float +5.0)
-  store volatile float %n3p4p5, float* %p
+  store volatile float %n3p4p5, ptr %p
   %n3p5p4 = call float @llvm.amdgcn.cubesc(float -3.0, float +5.0, float +4.0)
-  store volatile float %n3p5p4, float* %p
+  store volatile float %n3p5p4, ptr %p
   %n4p3p5 = call float @llvm.amdgcn.cubesc(float -4.0, float +3.0, float +5.0)
-  store volatile float %n4p3p5, float* %p
+  store volatile float %n4p3p5, ptr %p
   %n4p5p3 = call float @llvm.amdgcn.cubesc(float -4.0, float +5.0, float +3.0)
-  store volatile float %n4p5p3, float* %p
+  store volatile float %n4p5p3, ptr %p
   %n5p3p4 = call float @llvm.amdgcn.cubesc(float -5.0, float +3.0, float +4.0)
-  store volatile float %n5p3p4, float* %p
+  store volatile float %n5p3p4, ptr %p
   %n5p4p3 = call float @llvm.amdgcn.cubesc(float -5.0, float +4.0, float +3.0)
-  store volatile float %n5p4p3, float* %p
+  store volatile float %n5p4p3, ptr %p
   %n3p4n5 = call float @llvm.amdgcn.cubesc(float -3.0, float +4.0, float -5.0)
-  store volatile float %n3p4n5, float* %p
+  store volatile float %n3p4n5, ptr %p
   %n3p5n4 = call float @llvm.amdgcn.cubesc(float -3.0, float +5.0, float -4.0)
-  store volatile float %n3p5n4, float* %p
+  store volatile float %n3p5n4, ptr %p
   %n4p3n5 = call float @llvm.amdgcn.cubesc(float -4.0, float +3.0, float -5.0)
-  store volatile float %n4p3n5, float* %p
+  store volatile float %n4p3n5, ptr %p
   %n4p5n3 = call float @llvm.amdgcn.cubesc(float -4.0, float +5.0, float -3.0)
-  store volatile float %n4p5n3, float* %p
+  store volatile float %n4p5n3, ptr %p
   %n5p3n4 = call float @llvm.amdgcn.cubesc(float -5.0, float +3.0, float -4.0)
-  store volatile float %n5p3n4, float* %p
+  store volatile float %n5p3n4, ptr %p
   %n5p4n3 = call float @llvm.amdgcn.cubesc(float -5.0, float +4.0, float -3.0)
-  store volatile float %n5p4n3, float* %p
+  store volatile float %n5p4n3, ptr %p
   %n3n4p5 = call float @llvm.amdgcn.cubesc(float -3.0, float -4.0, float +5.0)
-  store volatile float %n3n4p5, float* %p
+  store volatile float %n3n4p5, ptr %p
   %n3n5p4 = call float @llvm.amdgcn.cubesc(float -3.0, float -5.0, float +4.0)
-  store volatile float %n3n5p4, float* %p
+  store volatile float %n3n5p4, ptr %p
   %n4n3p5 = call float @llvm.amdgcn.cubesc(float -4.0, float -3.0, float +5.0)
-  store volatile float %n4n3p5, float* %p
+  store volatile float %n4n3p5, ptr %p
   %n4n5p3 = call float @llvm.amdgcn.cubesc(float -4.0, float -5.0, float +3.0)
-  store volatile float %n4n5p3, float* %p
+  store volatile float %n4n5p3, ptr %p
   %n5n3p4 = call float @llvm.amdgcn.cubesc(float -5.0, float -3.0, float +4.0)
-  store volatile float %n5n3p4, float* %p
+  store volatile float %n5n3p4, ptr %p
   %n5n4p3 = call float @llvm.amdgcn.cubesc(float -5.0, float -4.0, float +3.0)
-  store volatile float %n5n4p3, float* %p
+  store volatile float %n5n4p3, ptr %p
   %n3n4n5 = call float @llvm.amdgcn.cubesc(float -3.0, float -4.0, float -5.0)
-  store volatile float %n3n4n5, float* %p
+  store volatile float %n3n4n5, ptr %p
   %n3n5n4 = call float @llvm.amdgcn.cubesc(float -3.0, float -5.0, float -4.0)
-  store volatile float %n3n5n4, float* %p
+  store volatile float %n3n5n4, ptr %p
   %n4n3n5 = call float @llvm.amdgcn.cubesc(float -4.0, float -3.0, float -5.0)
-  store volatile float %n4n3n5, float* %p
+  store volatile float %n4n3n5, ptr %p
   %n4n5n3 = call float @llvm.amdgcn.cubesc(float -4.0, float -5.0, float -3.0)
-  store volatile float %n4n5n3, float* %p
+  store volatile float %n4n5n3, ptr %p
   %n5n3n4 = call float @llvm.amdgcn.cubesc(float -5.0, float -3.0, float -4.0)
-  store volatile float %n5n3n4, float* %p
+  store volatile float %n5n3n4, ptr %p
   %n5n4n3 = call float @llvm.amdgcn.cubesc(float -5.0, float -4.0, float -3.0)
-  store volatile float %n5n4n3, float* %p
+  store volatile float %n5n4n3, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubetc.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubetc.ll
index a14b2f37a529c..add53fec7b702 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubetc.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/cubetc.ll
@@ -3,153 +3,153 @@
 
 declare float @llvm.amdgcn.cubetc(float, float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float -3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 3.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float -3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 3.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p3p4p5 = call float @llvm.amdgcn.cubetc(float +3.0, float +4.0, float +5.0)
-  store volatile float %p3p4p5, float* %p
+  store volatile float %p3p4p5, ptr %p
   %p3p5p4 = call float @llvm.amdgcn.cubetc(float +3.0, float +5.0, float +4.0)
-  store volatile float %p3p5p4, float* %p
+  store volatile float %p3p5p4, ptr %p
   %p4p3p5 = call float @llvm.amdgcn.cubetc(float +4.0, float +3.0, float +5.0)
-  store volatile float %p4p3p5, float* %p
+  store volatile float %p4p3p5, ptr %p
   %p4p5p3 = call float @llvm.amdgcn.cubetc(float +4.0, float +5.0, float +3.0)
-  store volatile float %p4p5p3, float* %p
+  store volatile float %p4p5p3, ptr %p
   %p5p3p4 = call float @llvm.amdgcn.cubetc(float +5.0, float +3.0, float +4.0)
-  store volatile float %p5p3p4, float* %p
+  store volatile float %p5p3p4, ptr %p
   %p5p4p3 = call float @llvm.amdgcn.cubetc(float +5.0, float +4.0, float +3.0)
-  store volatile float %p5p4p3, float* %p
+  store volatile float %p5p4p3, ptr %p
   %p3p4n5 = call float @llvm.amdgcn.cubetc(float +3.0, float +4.0, float -5.0)
-  store volatile float %p3p4n5, float* %p
+  store volatile float %p3p4n5, ptr %p
   %p3p5n4 = call float @llvm.amdgcn.cubetc(float +3.0, float +5.0, float -4.0)
-  store volatile float %p3p5n4, float* %p
+  store volatile float %p3p5n4, ptr %p
   %p4p3n5 = call float @llvm.amdgcn.cubetc(float +4.0, float +3.0, float -5.0)
-  store volatile float %p4p3n5, float* %p
+  store volatile float %p4p3n5, ptr %p
   %p4p5n3 = call float @llvm.amdgcn.cubetc(float +4.0, float +5.0, float -3.0)
-  store volatile float %p4p5n3, float* %p
+  store volatile float %p4p5n3, ptr %p
   %p5p3n4 = call float @llvm.amdgcn.cubetc(float +5.0, float +3.0, float -4.0)
-  store volatile float %p5p3n4, float* %p
+  store volatile float %p5p3n4, ptr %p
   %p5p4n3 = call float @llvm.amdgcn.cubetc(float +5.0, float +4.0, float -3.0)
-  store volatile float %p5p4n3, float* %p
+  store volatile float %p5p4n3, ptr %p
   %p3n4p5 = call float @llvm.amdgcn.cubetc(float +3.0, float -4.0, float +5.0)
-  store volatile float %p3n4p5, float* %p
+  store volatile float %p3n4p5, ptr %p
   %p3n5p4 = call float @llvm.amdgcn.cubetc(float +3.0, float -5.0, float +4.0)
-  store volatile float %p3n5p4, float* %p
+  store volatile float %p3n5p4, ptr %p
   %p4n3p5 = call float @llvm.amdgcn.cubetc(float +4.0, float -3.0, float +5.0)
-  store volatile float %p4n3p5, float* %p
+  store volatile float %p4n3p5, ptr %p
   %p4n5p3 = call float @llvm.amdgcn.cubetc(float +4.0, float -5.0, float +3.0)
-  store volatile float %p4n5p3, float* %p
+  store volatile float %p4n5p3, ptr %p
   %p5n3p4 = call float @llvm.amdgcn.cubetc(float +5.0, float -3.0, float +4.0)
-  store volatile float %p5n3p4, float* %p
+  store volatile float %p5n3p4, ptr %p
   %p5n4p3 = call float @llvm.amdgcn.cubetc(float +5.0, float -4.0, float +3.0)
-  store volatile float %p5n4p3, float* %p
+  store volatile float %p5n4p3, ptr %p
   %p3n4n5 = call float @llvm.amdgcn.cubetc(float +3.0, float -4.0, float -5.0)
-  store volatile float %p3n4n5, float* %p
+  store volatile float %p3n4n5, ptr %p
   %p3n5n4 = call float @llvm.amdgcn.cubetc(float +3.0, float -5.0, float -4.0)
-  store volatile float %p3n5n4, float* %p
+  store volatile float %p3n5n4, ptr %p
   %p4n3n5 = call float @llvm.amdgcn.cubetc(float +4.0, float -3.0, float -5.0)
-  store volatile float %p4n3n5, float* %p
+  store volatile float %p4n3n5, ptr %p
   %p4n5n3 = call float @llvm.amdgcn.cubetc(float +4.0, float -5.0, float -3.0)
-  store volatile float %p4n5n3, float* %p
+  store volatile float %p4n5n3, ptr %p
   %p5n3n4 = call float @llvm.amdgcn.cubetc(float +5.0, float -3.0, float -4.0)
-  store volatile float %p5n3n4, float* %p
+  store volatile float %p5n3n4, ptr %p
   %p5n4n3 = call float @llvm.amdgcn.cubetc(float +5.0, float -4.0, float -3.0)
-  store volatile float %p5n4n3, float* %p
+  store volatile float %p5n4n3, ptr %p
   %n3p4p5 = call float @llvm.amdgcn.cubetc(float -3.0, float +4.0, float +5.0)
-  store volatile float %n3p4p5, float* %p
+  store volatile float %n3p4p5, ptr %p
   %n3p5p4 = call float @llvm.amdgcn.cubetc(float -3.0, float +5.0, float +4.0)
-  store volatile float %n3p5p4, float* %p
+  store volatile float %n3p5p4, ptr %p
   %n4p3p5 = call float @llvm.amdgcn.cubetc(float -4.0, float +3.0, float +5.0)
-  store volatile float %n4p3p5, float* %p
+  store volatile float %n4p3p5, ptr %p
   %n4p5p3 = call float @llvm.amdgcn.cubetc(float -4.0, float +5.0, float +3.0)
-  store volatile float %n4p5p3, float* %p
+  store volatile float %n4p5p3, ptr %p
   %n5p3p4 = call float @llvm.amdgcn.cubetc(float -5.0, float +3.0, float +4.0)
-  store volatile float %n5p3p4, float* %p
+  store volatile float %n5p3p4, ptr %p
   %n5p4p3 = call float @llvm.amdgcn.cubetc(float -5.0, float +4.0, float +3.0)
-  store volatile float %n5p4p3, float* %p
+  store volatile float %n5p4p3, ptr %p
   %n3p4n5 = call float @llvm.amdgcn.cubetc(float -3.0, float +4.0, float -5.0)
-  store volatile float %n3p4n5, float* %p
+  store volatile float %n3p4n5, ptr %p
   %n3p5n4 = call float @llvm.amdgcn.cubetc(float -3.0, float +5.0, float -4.0)
-  store volatile float %n3p5n4, float* %p
+  store volatile float %n3p5n4, ptr %p
   %n4p3n5 = call float @llvm.amdgcn.cubetc(float -4.0, float +3.0, float -5.0)
-  store volatile float %n4p3n5, float* %p
+  store volatile float %n4p3n5, ptr %p
   %n4p5n3 = call float @llvm.amdgcn.cubetc(float -4.0, float +5.0, float -3.0)
-  store volatile float %n4p5n3, float* %p
+  store volatile float %n4p5n3, ptr %p
   %n5p3n4 = call float @llvm.amdgcn.cubetc(float -5.0, float +3.0, float -4.0)
-  store volatile float %n5p3n4, float* %p
+  store volatile float %n5p3n4, ptr %p
   %n5p4n3 = call float @llvm.amdgcn.cubetc(float -5.0, float +4.0, float -3.0)
-  store volatile float %n5p4n3, float* %p
+  store volatile float %n5p4n3, ptr %p
   %n3n4p5 = call float @llvm.amdgcn.cubetc(float -3.0, float -4.0, float +5.0)
-  store volatile float %n3n4p5, float* %p
+  store volatile float %n3n4p5, ptr %p
   %n3n5p4 = call float @llvm.amdgcn.cubetc(float -3.0, float -5.0, float +4.0)
-  store volatile float %n3n5p4, float* %p
+  store volatile float %n3n5p4, ptr %p
   %n4n3p5 = call float @llvm.amdgcn.cubetc(float -4.0, float -3.0, float +5.0)
-  store volatile float %n4n3p5, float* %p
+  store volatile float %n4n3p5, ptr %p
   %n4n5p3 = call float @llvm.amdgcn.cubetc(float -4.0, float -5.0, float +3.0)
-  store volatile float %n4n5p3, float* %p
+  store volatile float %n4n5p3, ptr %p
   %n5n3p4 = call float @llvm.amdgcn.cubetc(float -5.0, float -3.0, float +4.0)
-  store volatile float %n5n3p4, float* %p
+  store volatile float %n5n3p4, ptr %p
   %n5n4p3 = call float @llvm.amdgcn.cubetc(float -5.0, float -4.0, float +3.0)
-  store volatile float %n5n4p3, float* %p
+  store volatile float %n5n4p3, ptr %p
   %n3n4n5 = call float @llvm.amdgcn.cubetc(float -3.0, float -4.0, float -5.0)
-  store volatile float %n3n4n5, float* %p
+  store volatile float %n3n4n5, ptr %p
   %n3n5n4 = call float @llvm.amdgcn.cubetc(float -3.0, float -5.0, float -4.0)
-  store volatile float %n3n5n4, float* %p
+  store volatile float %n3n5n4, ptr %p
   %n4n3n5 = call float @llvm.amdgcn.cubetc(float -4.0, float -3.0, float -5.0)
-  store volatile float %n4n3n5, float* %p
+  store volatile float %n4n3n5, ptr %p
   %n4n5n3 = call float @llvm.amdgcn.cubetc(float -4.0, float -5.0, float -3.0)
-  store volatile float %n4n5n3, float* %p
+  store volatile float %n4n5n3, ptr %p
   %n5n3n4 = call float @llvm.amdgcn.cubetc(float -5.0, float -3.0, float -4.0)
-  store volatile float %n5n3n4, float* %p
+  store volatile float %n5n3n4, ptr %p
   %n5n4n3 = call float @llvm.amdgcn.cubetc(float -5.0, float -4.0, float -3.0)
-  store volatile float %n5n4n3, float* %p
+  store volatile float %n5n4n3, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fma_legacy.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fma_legacy.ll
index a0d68e4f3bd46..1597df712701b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fma_legacy.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fma_legacy.ll
@@ -3,42 +3,42 @@
 
 declare float @llvm.amdgcn.fma.legacy(float, float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float 1.000000e+01, float* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 4.000000e+00, float* [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+01, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 4.000000e+00, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %a = call float @llvm.amdgcn.fma.legacy(float +2.0, float +3.0, float +4.0)
-  store volatile float %a, float* %p
+  store volatile float %a, ptr %p
   %b = call float @llvm.amdgcn.fma.legacy(float +2.0, float +0.0, float +4.0)
-  store volatile float %b, float* %p
+  store volatile float %b, ptr %p
   %c = call float @llvm.amdgcn.fma.legacy(float +2.0, float -0.0, float +4.0)
-  store volatile float %c, float* %p
+  store volatile float %c, ptr %p
   %d = call float @llvm.amdgcn.fma.legacy(float +0.0, float +0.0, float -0.0)
-  store volatile float %d, float* %p
+  store volatile float %d, ptr %p
   %e = call float @llvm.amdgcn.fma.legacy(float +0.0, float -0.0, float -0.0)
-  store volatile float %e, float* %p
+  store volatile float %e, ptr %p
   %f = call float @llvm.amdgcn.fma.legacy(float -0.0, float +0.0, float -0.0)
-  store volatile float %f, float* %p
+  store volatile float %f, ptr %p
   %g = call float @llvm.amdgcn.fma.legacy(float -0.0, float -0.0, float -0.0)
-  store volatile float %g, float* %p
+  store volatile float %g, ptr %p
   %h = call float @llvm.amdgcn.fma.legacy(float +0.0, float 0x7ff0000000000000, float +4.0) ; +inf
-  store volatile float %h, float* %p
+  store volatile float %h, ptr %p
   %i = call float @llvm.amdgcn.fma.legacy(float 0xfff0000000000000, float +0.0, float +4.0) ; -inf
-  store volatile float %i, float* %p
+  store volatile float %i, ptr %p
   %j = call float @llvm.amdgcn.fma.legacy(float 0x7ff0001000000000, float -0.0, float +4.0) ; +nan
-  store volatile float %j, float* %p
+  store volatile float %j, ptr %p
   %k = call float @llvm.amdgcn.fma.legacy(float -0.0, float 0xfff0000100000000, float +4.0) ; -nan
-  store volatile float %k, float* %p
+  store volatile float %k, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fmul_legacy.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fmul_legacy.ll
index 0ad4fc9a5c063..3a53a8199ca09 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fmul_legacy.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fmul_legacy.ll
@@ -3,42 +3,42 @@
 
 declare float @llvm.amdgcn.fmul.legacy(float, float)
 
-define void @test(float* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile float 6.000000e+00, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
+; CHECK-NEXT:    store volatile float 6.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %a = call float @llvm.amdgcn.fmul.legacy(float +2.0, float +3.0)
-  store volatile float %a, float* %p
+  store volatile float %a, ptr %p
   %b = call float @llvm.amdgcn.fmul.legacy(float +2.0, float +0.0)
-  store volatile float %b, float* %p
+  store volatile float %b, ptr %p
   %c = call float @llvm.amdgcn.fmul.legacy(float +2.0, float -0.0)
-  store volatile float %c, float* %p
+  store volatile float %c, ptr %p
   %d = call float @llvm.amdgcn.fmul.legacy(float +0.0, float +0.0)
-  store volatile float %d, float* %p
+  store volatile float %d, ptr %p
   %e = call float @llvm.amdgcn.fmul.legacy(float +0.0, float -0.0)
-  store volatile float %e, float* %p
+  store volatile float %e, ptr %p
   %f = call float @llvm.amdgcn.fmul.legacy(float -0.0, float +0.0)
-  store volatile float %f, float* %p
+  store volatile float %f, ptr %p
   %g = call float @llvm.amdgcn.fmul.legacy(float -0.0, float -0.0)
-  store volatile float %g, float* %p
+  store volatile float %g, ptr %p
   %h = call float @llvm.amdgcn.fmul.legacy(float +0.0, float 0x7ff0000000000000) ; +inf
-  store volatile float %h, float* %p
+  store volatile float %h, ptr %p
   %i = call float @llvm.amdgcn.fmul.legacy(float 0xfff0000000000000, float +0.0) ; -inf
-  store volatile float %i, float* %p
+  store volatile float %i, ptr %p
   %j = call float @llvm.amdgcn.fmul.legacy(float 0x7ff0001000000000, float -0.0) ; +nan
-  store volatile float %j, float* %p
+  store volatile float %j, ptr %p
   %k = call float @llvm.amdgcn.fmul.legacy(float -0.0, float 0xfff0000100000000) ; -nan
-  store volatile float %k, float* %p
+  store volatile float %k, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fract.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fract.ll
index 69d351bc22fee..73fc897748f64 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fract.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/fract.ll
@@ -5,122 +5,122 @@ declare half @llvm.amdgcn.fract.f16(half)
 declare float @llvm.amdgcn.fract.f32(float)
 declare double @llvm.amdgcn.fract.f64(double)
 
-define void @test_f16(half* %p) {
+define void @test_f16(ptr %p) {
 ; CHECK-LABEL: @test_f16(
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P:%.*]]
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH3400, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH3B00, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH0400, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH3BFF, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH7E00, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH7E00, half* [[P]]
-; CHECK-NEXT:    store volatile half 0xH7E00, half* [[P]]
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH3400, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH3B00, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH0400, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH3BFF, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH7E00, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH7E00, ptr [[P]]
+; CHECK-NEXT:    store volatile half 0xH7E00, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call half @llvm.amdgcn.fract.f16(half +0.0)
-  store volatile half %p0, half* %p
+  store volatile half %p0, ptr %p
   %n0 = call half @llvm.amdgcn.fract.f16(half -0.0)
-  store volatile half %n0, half* %p
+  store volatile half %n0, ptr %p
   %p1 = call half @llvm.amdgcn.fract.f16(half +1.0)
-  store volatile half %p1, half* %p
+  store volatile half %p1, ptr %p
   %n1 = call half @llvm.amdgcn.fract.f16(half -1.0)
-  store volatile half %n1, half* %p
+  store volatile half %n1, ptr %p
   %p225 = call half @llvm.amdgcn.fract.f16(half +2.25)
-  store volatile half %p225, half* %p
+  store volatile half %p225, ptr %p
   %n6125 = call half @llvm.amdgcn.fract.f16(half -6.125)
-  store volatile half %n6125, half* %p
+  store volatile half %n6125, ptr %p
   %ptiny = call half @llvm.amdgcn.fract.f16(half 0xH0400) ; +min normal
-  store volatile half %ptiny, half* %p
+  store volatile half %ptiny, ptr %p
   %ntiny = call half @llvm.amdgcn.fract.f16(half 0xH8400) ; -min normal
-  store volatile half %ntiny, half* %p
+  store volatile half %ntiny, ptr %p
   %pinf = call half @llvm.amdgcn.fract.f16(half 0xH7C00) ; +inf
-  store volatile half %pinf, half* %p
+  store volatile half %pinf, ptr %p
   %ninf = call half @llvm.amdgcn.fract.f16(half 0xHFC00) ; -inf
-  store volatile half %ninf, half* %p
+  store volatile half %ninf, ptr %p
   %nan = call half @llvm.amdgcn.fract.f16(half 0xH7E00) ; nan
-  store volatile half %nan, half* %p
+  store volatile half %nan, ptr %p
   ret void
 }
 
-define void @test_f32(float* %p) {
+define void @test_f32(ptr %p) {
 ; CHECK-LABEL: @test_f32(
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P:%.*]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]]
-; CHECK-NEXT:    store volatile float 2.500000e-01, float* [[P]]
-; CHECK-NEXT:    store volatile float 8.750000e-01, float* [[P]]
-; CHECK-NEXT:    store volatile float 0x3810000000000000, float* [[P]]
-; CHECK-NEXT:    store volatile float 0x3FEFFFFFE0000000, float* [[P]]
-; CHECK-NEXT:    store volatile float 0x7FF8000000000000, float* [[P]]
-; CHECK-NEXT:    store volatile float 0x7FF8000000000000, float* [[P]]
-; CHECK-NEXT:    store volatile float 0x7FF8000000000000, float* [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile float 2.500000e-01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 8.750000e-01, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0x3810000000000000, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0x3FEFFFFFE0000000, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0x7FF8000000000000, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0x7FF8000000000000, ptr [[P]]
+; CHECK-NEXT:    store volatile float 0x7FF8000000000000, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call float @llvm.amdgcn.fract.f32(float +0.0)
-  store volatile float %p0, float* %p
+  store volatile float %p0, ptr %p
   %n0 = call float @llvm.amdgcn.fract.f32(float -0.0)
-  store volatile float %n0, float* %p
+  store volatile float %n0, ptr %p
   %p1 = call float @llvm.amdgcn.fract.f32(float +1.0)
-  store volatile float %p1, float* %p
+  store volatile float %p1, ptr %p
   %n1 = call float @llvm.amdgcn.fract.f32(float -1.0)
-  store volatile float %n1, float* %p
+  store volatile float %n1, ptr %p
   %p225 = call float @llvm.amdgcn.fract.f32(float +2.25)
-  store volatile float %p225, float* %p
+  store volatile float %p225, ptr %p
   %n6125 = call float @llvm.amdgcn.fract.f32(float -6.125)
-  store volatile float %n6125, float* %p
+  store volatile float %n6125, ptr %p
   %ptiny = call float @llvm.amdgcn.fract.f32(float 0x3810000000000000) ; +min normal
-  store volatile float %ptiny, float* %p
+  store volatile float %ptiny, ptr %p
   %ntiny = call float @llvm.amdgcn.fract.f32(float 0xB810000000000000) ; -min normal
-  store volatile float %ntiny, float* %p
+  store volatile float %ntiny, ptr %p
   %pinf = call float @llvm.amdgcn.fract.f32(float 0x7FF0000000000000) ; +inf
-  store volatile float %pinf, float* %p
+  store volatile float %pinf, ptr %p
   %ninf = call float @llvm.amdgcn.fract.f32(float 0xFFF0000000000000) ; -inf
-  store volatile float %ninf, float* %p
+  store volatile float %ninf, ptr %p
   %nan = call float @llvm.amdgcn.fract.f32(float 0x7FF8000000000000) ; nan
-  store volatile float %nan, float* %p
+  store volatile float %nan, ptr %p
   ret void
 }
 
-define void @test_f64(double* %p) {
+define void @test_f64(ptr %p) {
 ; CHECK-LABEL: @test_f64(
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P:%.*]]
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]]
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]]
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]]
-; CHECK-NEXT:    store volatile double 2.500000e-01, double* [[P]]
-; CHECK-NEXT:    store volatile double 8.750000e-01, double* [[P]]
-; CHECK-NEXT:    store volatile double 2.000000e-308, double* [[P]]
-; CHECK-NEXT:    store volatile double 0x3FEFFFFFFFFFFFFF, double* [[P]]
-; CHECK-NEXT:    store volatile double 0x7FF8000000000000, double* [[P]]
-; CHECK-NEXT:    store volatile double 0x7FF8000000000000, double* [[P]]
-; CHECK-NEXT:    store volatile double 0x7FF8000000000000, double* [[P]]
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P:%.*]]
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]]
+; CHECK-NEXT:    store volatile double 2.500000e-01, ptr [[P]]
+; CHECK-NEXT:    store volatile double 8.750000e-01, ptr [[P]]
+; CHECK-NEXT:    store volatile double 2.000000e-308, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0x3FEFFFFFFFFFFFFF, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0x7FF8000000000000, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0x7FF8000000000000, ptr [[P]]
+; CHECK-NEXT:    store volatile double 0x7FF8000000000000, ptr [[P]]
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call double @llvm.amdgcn.fract.f64(double +0.0)
-  store volatile double %p0, double* %p
+  store volatile double %p0, ptr %p
   %n0 = call double @llvm.amdgcn.fract.f64(double -0.0)
-  store volatile double %n0, double* %p
+  store volatile double %n0, ptr %p
   %p1 = call double @llvm.amdgcn.fract.f64(double +1.0)
-  store volatile double %p1, double* %p
+  store volatile double %p1, ptr %p
   %n1 = call double @llvm.amdgcn.fract.f64(double -1.0)
-  store volatile double %n1, double* %p
+  store volatile double %n1, ptr %p
   %p225 = call double @llvm.amdgcn.fract.f64(double +2.25)
-  store volatile double %p225, double* %p
+  store volatile double %p225, ptr %p
   %n6125 = call double @llvm.amdgcn.fract.f64(double -6.125)
-  store volatile double %n6125, double* %p
+  store volatile double %n6125, ptr %p
   %ptiny = call double @llvm.amdgcn.fract.f64(double +2.0e-308) ; +min normal
-  store volatile double %ptiny, double* %p
+  store volatile double %ptiny, ptr %p
   %ntiny = call double @llvm.amdgcn.fract.f64(double -2.0e-308) ; -min normal
-  store volatile double %ntiny, double* %p
+  store volatile double %ntiny, ptr %p
   %pinf = call double @llvm.amdgcn.fract.f64(double 0x7FF0000000000000) ; +inf
-  store volatile double %pinf, double* %p
+  store volatile double %pinf, ptr %p
   %ninf = call double @llvm.amdgcn.fract.f64(double 0xFFF0000000000000) ; -inf
-  store volatile double %ninf, double* %p
+  store volatile double %ninf, ptr %p
   %nan = call double @llvm.amdgcn.fract.f64(double 0x7FF8000000000000) ; nan
-  store volatile double %nan, double* %p
+  store volatile double %nan, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/perm.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/perm.ll
index 9fc38aa823015..7be29367e7120 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/perm.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/perm.ll
@@ -4,99 +4,99 @@
 declare i32 @llvm.amdgcn.perm(i32, i32, i32)
 
 ; src1 = 0x19203a4b (421542475), src2 = 0x5c6d7e8f (1550679695)
-define void @test(i32* %p) {
+define void @test(ptr %p) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    store volatile i32 undef, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 -1887539876, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2121096267, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1262100505, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1550679695, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 421542475, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 545143439, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 16711935, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 16711935, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 436174336, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 16711680, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 undef, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 421542475, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1550679695, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 undef, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 143, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 255, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1550679552, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 75, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 255, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 65535, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 421542400, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -16776961, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 255, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -16777216, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 undef, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 -1887539876, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2121096267, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1262100505, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1550679695, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 421542475, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 545143439, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 16711935, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 16711935, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 436174336, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 16711680, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 undef, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 421542475, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1550679695, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 undef, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 143, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 255, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1550679552, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 75, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 255, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 65535, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 421542400, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -16776961, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 255, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -16777216, ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %s1s2_u = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 undef)
-  store volatile i32 %s1s2_u, i32* %p
+  store volatile i32 %s1s2_u, ptr %p
   %s1s2_0x00010203 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 66051)
-  store volatile i32 %s1s2_0x00010203, i32* %p
+  store volatile i32 %s1s2_0x00010203, ptr %p
   %s1s2_0x01020304 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 16909060)
-  store volatile i32 %s1s2_0x01020304, i32* %p
+  store volatile i32 %s1s2_0x01020304, ptr %p
   %s1s2_0x04050607 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 67438087)
-  store volatile i32 %s1s2_0x04050607, i32* %p
+  store volatile i32 %s1s2_0x04050607, ptr %p
   %s1s2_0x03020100 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 50462976)
-  store volatile i32 %s1s2_0x03020100, i32* %p
+  store volatile i32 %s1s2_0x03020100, ptr %p
   %s1s2_0x07060504 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 117835012)
-  store volatile i32 %s1s2_0x07060504, i32* %p
+  store volatile i32 %s1s2_0x07060504, ptr %p
   %s1s2_0x06010500 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 100730112)
-  store volatile i32 %s1s2_0x06010500, i32* %p
+  store volatile i32 %s1s2_0x06010500, ptr %p
   %s1s2_0x0c0f0c0f = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 202312719)
-  store volatile i32 %s1s2_0x0c0f0c0f, i32* %p
+  store volatile i32 %s1s2_0x0c0f0c0f, ptr %p
   %u1u2_0x0c0f0c0f = call i32 @llvm.amdgcn.perm(i32 undef, i32 undef, i32 202312719)
-  store volatile i32 %u1u2_0x0c0f0c0f, i32* %p
+  store volatile i32 %u1u2_0x0c0f0c0f, ptr %p
   %s1s2_0x070d010c = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 118292748)
-  store volatile i32 %s1s2_0x070d010c, i32* %p
+  store volatile i32 %s1s2_0x070d010c, ptr %p
   %u1u2_0x070d010c = call i32 @llvm.amdgcn.perm(i32 undef, i32 undef, i32 118292748)
-  store volatile i32 %u1u2_0x070d010c, i32* %p
+  store volatile i32 %u1u2_0x070d010c, ptr %p
   %s1s2_0x80818283 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 1550679695, i32 2155971203)
-  store volatile i32 %s1s2_0x80818283, i32* %p
+  store volatile i32 %s1s2_0x80818283, ptr %p
   %u1u2_0x80818283 = call i32 @llvm.amdgcn.perm(i32 undef, i32 undef, i32 2155971203)
-  store volatile i32 %u1u2_0x80818283, i32* %p
+  store volatile i32 %u1u2_0x80818283, ptr %p
   %u1u2_0x0e0e0e0e = call i32 @llvm.amdgcn.perm(i32 undef, i32 undef, i32 235802126)
-  store volatile i32 %u1u2_0x0e0e0e0e, i32* %p
+  store volatile i32 %u1u2_0x0e0e0e0e, ptr %p
   %u1s2_0x07060504 = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 117835012)
-  store volatile i32 %u1s2_0x07060504, i32* %p
+  store volatile i32 %u1s2_0x07060504, ptr %p
   %s1u2_0x07060504 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 117835012)
-  store volatile i32 %s1u2_0x07060504, i32* %p
+  store volatile i32 %s1u2_0x07060504, ptr %p
   %u1s2_0x03020100 = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 50462976)
-  store volatile i32 %u1s2_0x03020100, i32* %p
+  store volatile i32 %u1s2_0x03020100, ptr %p
   %s1u2_0x03020100 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 50462976)
-  store volatile i32 %s1u2_0x03020100, i32* %p
+  store volatile i32 %s1u2_0x03020100, ptr %p
   %u1s2_0x07060500 = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 117835008)
-  store volatile i32 %u1s2_0x07060500, i32* %p
+  store volatile i32 %u1s2_0x07060500, ptr %p
   %u1s2_0x0706050c = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 117835020)
-  store volatile i32 %u1s2_0x0706050c, i32* %p
+  store volatile i32 %u1s2_0x0706050c, ptr %p
   %u1s2_0x0706050d = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 117835021)
-  store volatile i32 %u1s2_0x0706050d, i32* %p
+  store volatile i32 %u1s2_0x0706050d, ptr %p
   %u1s2_0x03020104 = call i32 @llvm.amdgcn.perm(i32 undef, i32 1550679695, i32 50462980)
-  store volatile i32 %u1s2_0x03020104, i32* %p
+  store volatile i32 %u1s2_0x03020104, ptr %p
   %s1u2_0x03020104 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 50462980)
-  store volatile i32 %s1u2_0x03020104, i32* %p
+  store volatile i32 %s1u2_0x03020104, ptr %p
   %s1u2_0x0302010c = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 50462988)
-  store volatile i32 %s1u2_0x0302010c, i32* %p
+  store volatile i32 %s1u2_0x0302010c, ptr %p
   %s1u2_0x0302010e = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 50462990)
-  store volatile i32 %s1u2_0x0302010e, i32* %p
+  store volatile i32 %s1u2_0x0302010e, ptr %p
   %s1u2_0x03020f0e = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 50466574)
-  store volatile i32 %s1u2_0x03020f0e, i32* %p
+  store volatile i32 %s1u2_0x03020f0e, ptr %p
   %s1u2_0x07060500 = call i32 @llvm.amdgcn.perm(i32 421542475, i32 undef, i32 117835008)
-  store volatile i32 %s1u2_0x07060500, i32* %p
+  store volatile i32 %s1u2_0x07060500, ptr %p
   %_0x81000100_0x01008100_0x0b0a0908 = call i32 @llvm.amdgcn.perm(i32 2164261120, i32 16810240, i32 185207048)
-  store volatile i32 %_0x81000100_0x01008100_0x0b0a0908, i32* %p
+  store volatile i32 %_0x81000100_0x01008100_0x0b0a0908, ptr %p
   %_u1_0x01008100_0x0b0a0908 = call i32 @llvm.amdgcn.perm(i32 undef, i32 16810240, i32 185207048)
-  store volatile i32 %_u1_0x01008100_0x0b0a0908, i32* %p
+  store volatile i32 %_u1_0x01008100_0x0b0a0908, ptr %p
   %_0x81000100_u2_0x0b0a0908 = call i32 @llvm.amdgcn.perm(i32 2164261120, i32 undef, i32 185207048)
-  store volatile i32 %_0x81000100_u2_0x0b0a0908, i32* %p
+  store volatile i32 %_0x81000100_u2_0x0b0a0908, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/sin.ll b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/sin.ll
index 1c4a3e773d42a..6aeecfff7c031 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/sin.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/AMDGPU/sin.ll
@@ -5,237 +5,237 @@ declare half @llvm.amdgcn.sin.f16(half) #0
 declare float @llvm.amdgcn.sin.f32(float) #0
 declare double @llvm.amdgcn.sin.f64(double) #0
 
-define void @test_f16(half* %p) {
+define void @test_f16(ptr %p) {
 ; CHECK-LABEL: @test_f16(
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P:%.*]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH39A8, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xHB9A8, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH3C00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xHBC00, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
-; CHECK-NEXT:    store volatile half 0xH0000, half* [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P:%.*]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH39A8, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xHB9A8, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH3C00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xHBC00, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
+; CHECK-NEXT:    store volatile half 0xH0000, ptr [[P]], align 2
 ; CHECK-NEXT:    [[P1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH63D0)
-; CHECK-NEXT:    store volatile half [[P1000]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[P1000]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[N1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHE3D0)
-; CHECK-NEXT:    store volatile half [[N1000]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[N1000]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[PINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7C00)
-; CHECK-NEXT:    store volatile half [[PINF]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[PINF]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[NINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHFC00)
-; CHECK-NEXT:    store volatile half [[NINF]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[NINF]], ptr [[P]], align 2
 ; CHECK-NEXT:    [[NAN:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7E00)
-; CHECK-NEXT:    store volatile half [[NAN]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[NAN]], ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call half @llvm.amdgcn.sin.f16(half +0.0)
-  store volatile half %p0, half* %p
+  store volatile half %p0, ptr %p
   %n0 = call half @llvm.amdgcn.sin.f16(half -0.0)
-  store volatile half %n0, half* %p
+  store volatile half %n0, ptr %p
   %p0125 = call half @llvm.amdgcn.sin.f16(half +0.125)
-  store volatile half %p0125, half* %p
+  store volatile half %p0125, ptr %p
   %n0125 = call half @llvm.amdgcn.sin.f16(half -0.125)
-  store volatile half %n0125, half* %p
+  store volatile half %n0125, ptr %p
   %p025 = call half @llvm.amdgcn.sin.f16(half +0.25)
-  store volatile half %p025, half* %p
+  store volatile half %p025, ptr %p
   %n025 = call half @llvm.amdgcn.sin.f16(half -0.25)
-  store volatile half %n025, half* %p
+  store volatile half %n025, ptr %p
   %p05 = call half @llvm.amdgcn.sin.f16(half +0.5)
-  store volatile half %p05, half* %p
+  store volatile half %p05, ptr %p
   %n05 = call half @llvm.amdgcn.sin.f16(half -0.5)
-  store volatile half %n05, half* %p
+  store volatile half %n05, ptr %p
   %p1 = call half @llvm.amdgcn.sin.f16(half +1.0)
-  store volatile half %p1, half* %p
+  store volatile half %p1, ptr %p
   %n1 = call half @llvm.amdgcn.sin.f16(half -1.0)
-  store volatile half %n1, half* %p
+  store volatile half %n1, ptr %p
   %p256 = call half @llvm.amdgcn.sin.f16(half +256.0)
-  store volatile half %p256, half* %p
+  store volatile half %p256, ptr %p
   %n256 = call half @llvm.amdgcn.sin.f16(half -256.0)
-  store volatile half %n256, half* %p
+  store volatile half %n256, ptr %p
   %p1000 = call half @llvm.amdgcn.sin.f16(half +1000.0)
-  store volatile half %p1000, half* %p
+  store volatile half %p1000, ptr %p
   %n1000 = call half @llvm.amdgcn.sin.f16(half -1000.0)
-  store volatile half %n1000, half* %p
+  store volatile half %n1000, ptr %p
   %pinf = call half @llvm.amdgcn.sin.f16(half 0xH7C00) ; +inf
-  store volatile half %pinf, half* %p
+  store volatile half %pinf, ptr %p
   %ninf = call half @llvm.amdgcn.sin.f16(half 0xHFC00) ; -inf
-  store volatile half %ninf, half* %p
+  store volatile half %ninf, ptr %p
   %nan = call half @llvm.amdgcn.sin.f16(half 0xH7E00) ; nan
-  store volatile half %nan, half* %p
+  store volatile half %nan, ptr %p
   ret void
 }
 
-define void @test_f32(float* %p) {
+define void @test_f32(ptr %p) {
 ; CHECK-LABEL: @test_f32(
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0xBFE6A09E60000000, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float -1.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
-; CHECK-NEXT:    store volatile float 0.000000e+00, float* [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0x3FE6A09E60000000, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0xBFE6A09E60000000, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float -1.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile float 0.000000e+00, ptr [[P]], align 4
 ; CHECK-NEXT:    [[P1000:%.*]] = call float @llvm.amdgcn.sin.f32(float 1.000000e+03)
-; CHECK-NEXT:    store volatile float [[P1000]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[P1000]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[N1000:%.*]] = call float @llvm.amdgcn.sin.f32(float -1.000000e+03)
-; CHECK-NEXT:    store volatile float [[N1000]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[N1000]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[PINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile float [[PINF]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[PINF]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[NINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile float [[NINF]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[NINF]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[NAN:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile float [[NAN]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[NAN]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call float @llvm.amdgcn.sin.f32(float +0.0)
-  store volatile float %p0, float* %p
+  store volatile float %p0, ptr %p
   %n0 = call float @llvm.amdgcn.sin.f32(float -0.0)
-  store volatile float %n0, float* %p
+  store volatile float %n0, ptr %p
   %p0125 = call float @llvm.amdgcn.sin.f32(float +0.125)
-  store volatile float %p0125, float* %p
+  store volatile float %p0125, ptr %p
   %n0125 = call float @llvm.amdgcn.sin.f32(float -0.125)
-  store volatile float %n0125, float* %p
+  store volatile float %n0125, ptr %p
   %p025 = call float @llvm.amdgcn.sin.f32(float +0.25)
-  store volatile float %p025, float* %p
+  store volatile float %p025, ptr %p
   %n025 = call float @llvm.amdgcn.sin.f32(float -0.25)
-  store volatile float %n025, float* %p
+  store volatile float %n025, ptr %p
   %p05 = call float @llvm.amdgcn.sin.f32(float +0.5)
-  store volatile float %p05, float* %p
+  store volatile float %p05, ptr %p
   %n05 = call float @llvm.amdgcn.sin.f32(float -0.5)
-  store volatile float %n05, float* %p
+  store volatile float %n05, ptr %p
   %p1 = call float @llvm.amdgcn.sin.f32(float +1.0)
-  store volatile float %p1, float* %p
+  store volatile float %p1, ptr %p
   %n1 = call float @llvm.amdgcn.sin.f32(float -1.0)
-  store volatile float %n1, float* %p
+  store volatile float %n1, ptr %p
   %p256 = call float @llvm.amdgcn.sin.f32(float +256.0)
-  store volatile float %p256, float* %p
+  store volatile float %p256, ptr %p
   %n256 = call float @llvm.amdgcn.sin.f32(float -256.0)
-  store volatile float %n256, float* %p
+  store volatile float %n256, ptr %p
   %p1000 = call float @llvm.amdgcn.sin.f32(float +1000.0)
-  store volatile float %p1000, float* %p
+  store volatile float %p1000, ptr %p
   %n1000 = call float @llvm.amdgcn.sin.f32(float -1000.0)
-  store volatile float %n1000, float* %p
+  store volatile float %n1000, ptr %p
   %pinf = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000) ; +inf
-  store volatile float %pinf, float* %p
+  store volatile float %pinf, ptr %p
   %ninf = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000) ; -inf
-  store volatile float %ninf, float* %p
+  store volatile float %ninf, ptr %p
   %nan = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000) ; nan
-  store volatile float %nan, float* %p
+  store volatile float %nan, ptr %p
   ret void
 }
 
-define void @test_f64(double* %p) {
+define void @test_f64(ptr %p) {
 ; CHECK-LABEL: @test_f64(
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0xBFE6A09E667F3B{{.*}}, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double -1.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
-; CHECK-NEXT:    store volatile double 0.000000e+00, double* [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0x3FE6A09E667F3B{{.*}}, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0xBFE6A09E667F3B{{.*}}, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double -1.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile double 0.000000e+00, ptr [[P]], align 8
 ; CHECK-NEXT:    [[P1000:%.*]] = call double @llvm.amdgcn.sin.f64(double 1.000000e+03)
-; CHECK-NEXT:    store volatile double [[P1000]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[P1000]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[N1000:%.*]] = call double @llvm.amdgcn.sin.f64(double -1.000000e+03)
-; CHECK-NEXT:    store volatile double [[N1000]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[N1000]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[PINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile double [[PINF]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[PINF]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[NINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile double [[NINF]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[NINF]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[NAN:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile double [[NAN]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[NAN]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call double @llvm.amdgcn.sin.f64(double +0.0)
-  store volatile double %p0, double* %p
+  store volatile double %p0, ptr %p
   %n0 = call double @llvm.amdgcn.sin.f64(double -0.0)
-  store volatile double %n0, double* %p
+  store volatile double %n0, ptr %p
   %p0125 = call double @llvm.amdgcn.sin.f64(double +0.125)
-  store volatile double %p0125, double* %p
+  store volatile double %p0125, ptr %p
   %n0125 = call double @llvm.amdgcn.sin.f64(double -0.125)
-  store volatile double %n0125, double* %p
+  store volatile double %n0125, ptr %p
   %p025 = call double @llvm.amdgcn.sin.f64(double +0.25)
-  store volatile double %p025, double* %p
+  store volatile double %p025, ptr %p
   %n025 = call double @llvm.amdgcn.sin.f64(double -0.25)
-  store volatile double %n025, double* %p
+  store volatile double %n025, ptr %p
   %p05 = call double @llvm.amdgcn.sin.f64(double +0.5)
-  store volatile double %p05, double* %p
+  store volatile double %p05, ptr %p
   %n05 = call double @llvm.amdgcn.sin.f64(double -0.5)
-  store volatile double %n05, double* %p
+  store volatile double %n05, ptr %p
   %p1 = call double @llvm.amdgcn.sin.f64(double +1.0)
-  store volatile double %p1, double* %p
+  store volatile double %p1, ptr %p
   %n1 = call double @llvm.amdgcn.sin.f64(double -1.0)
-  store volatile double %n1, double* %p
+  store volatile double %n1, ptr %p
   %p256 = call double @llvm.amdgcn.sin.f64(double +256.0)
-  store volatile double %p256, double* %p
+  store volatile double %p256, ptr %p
   %n256 = call double @llvm.amdgcn.sin.f64(double -256.0)
-  store volatile double %n256, double* %p
+  store volatile double %n256, ptr %p
   %p1000 = call double @llvm.amdgcn.sin.f64(double +1000.0)
-  store volatile double %p1000, double* %p
+  store volatile double %p1000, ptr %p
   %n1000 = call double @llvm.amdgcn.sin.f64(double -1000.0)
-  store volatile double %n1000, double* %p
+  store volatile double %n1000, ptr %p
   %pinf = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000) ; +inf
-  store volatile double %pinf, double* %p
+  store volatile double %pinf, ptr %p
   %ninf = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000) ; -inf
-  store volatile double %ninf, double* %p
+  store volatile double %ninf, ptr %p
   %nan = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000) ; nan
-  store volatile double %nan, double* %p
+  store volatile double %nan, ptr %p
   ret void
 }
 
-define void @test_f16_strictfp (half* %p) #1 {
+define void @test_f16_strictfp (ptr %p) #1 {
 ; CHECK-LABEL: @test_f16_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH0000) #1
-; CHECK-NEXT:    store volatile half [[P0]], half* [[P:%.*]], align 2
+; CHECK-NEXT:    store volatile half [[P0]], ptr [[P:%.*]], align 2
 ; CHECK-NEXT:    [[P025:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH3400) #1
-; CHECK-NEXT:    store volatile half [[P025]], half* [[P]], align 2
+; CHECK-NEXT:    store volatile half [[P025]], ptr [[P]], align 2
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call half @llvm.amdgcn.sin.f16(half +0.0) #1
-  store volatile half %p0, half* %p
+  store volatile half %p0, ptr %p
   %p025 = call half @llvm.amdgcn.sin.f16(half +0.25) #1
-  store volatile half %p025, half* %p
+  store volatile half %p025, ptr %p
   ret void
 }
 
-define void @test_f32_strictfp(float* %p) #1 {
+define void @test_f32_strictfp(ptr %p) #1 {
 ; CHECK-LABEL: @test_f32_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call float @llvm.amdgcn.sin.f32(float 0.000000e+00) #1
-; CHECK-NEXT:    store volatile float [[P0]], float* [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float [[P0]], ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[P025:%.*]] = call float @llvm.amdgcn.sin.f32(float 2.500000e-01) #1
-; CHECK-NEXT:    store volatile float [[P025]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float [[P025]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call float @llvm.amdgcn.sin.f32(float +0.0) #1
-  store volatile float %p0, float* %p
+  store volatile float %p0, ptr %p
   %p025 = call float @llvm.amdgcn.sin.f32(float +0.25) #1
-  store volatile float %p025, float* %p
+  store volatile float %p025, ptr %p
   ret void
 }
 
-define void @test_f64_strictfp(double* %p) #1 {
+define void @test_f64_strictfp(ptr %p) #1 {
 ; CHECK-LABEL: @test_f64_strictfp(
 ; CHECK-NEXT:    [[P0:%.*]] = call double @llvm.amdgcn.sin.f64(double 0.000000e+00) #1
-; CHECK-NEXT:    store volatile double [[P0]], double* [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile double [[P0]], ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    [[P025:%.*]] = call double @llvm.amdgcn.sin.f64(double 2.500000e-01) #1
-; CHECK-NEXT:    store volatile double [[P025]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double [[P025]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %p0 = call double @llvm.amdgcn.sin.f64(double +0.0) #1
-  store volatile double %p0, double* %p
+  store volatile double %p0, ptr %p
   %p025 = call double @llvm.amdgcn.sin.f64(double +0.25) #1
-  store volatile double %p025, double* %p
+  store volatile double %p025, ptr %p
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/trunc.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/trunc.ll
index 9ae059e6f9e8b..8a884928dad63 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/trunc.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/trunc.ll
@@ -13,674 +13,674 @@ declare i64 @llvm.wasm.trunc.unsigned.i64.f32(float)
 declare i64 @llvm.wasm.trunc.signed.i64.f64(double)
 declare i64 @llvm.wasm.trunc.unsigned.i64.f64(double)
 
-define void @test_i32_trunc_f32_s(i32* %p) {
+define void @test_i32_trunc_f32_s(ptr %p) {
 ; CHECK-LABEL: @test_i32_trunc_f32_s(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2147483520, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2147483648, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2147483520, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2147483648, ptr [[P]], align 4
 ; CHECK-NEXT:    [[T14:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x41E0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T14]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T14]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T15:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xC1E0000020000000)
-; CHECK-NEXT:    store volatile i32 [[T15]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T15]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T16:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T16]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T16]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T17:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T17]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T17]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T18:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T18]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T18]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T19:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7FFA000000000000)
-; CHECK-NEXT:    store volatile i32 [[T19]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T19]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T20:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T20]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T20]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T21:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xFFFA000000000000)
-; CHECK-NEXT:    store volatile i32 [[T21]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T21]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i32 @llvm.wasm.trunc.signed.i32.f32(float +0.0)
-  store volatile i32 %t0, i32* %p
+  store volatile i32 %t0, ptr %p
   %t1 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -0.0)
-  store volatile i32 %t1, i32* %p
+  store volatile i32 %t1, ptr %p
   %t2 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x36a0000000000000); 0x1p-149
-  store volatile i32 %t2, i32* %p
+  store volatile i32 %t2, ptr %p
   %t3 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xb6a0000000000000); -0x1p-149
-  store volatile i32 %t3, i32* %p
+  store volatile i32 %t3, ptr %p
   %t4 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 1.0)
-  store volatile i32 %t4, i32* %p
+  store volatile i32 %t4, ptr %p
   %t5 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x3ff19999a0000000); 0x1.19999ap+0
-  store volatile i32 %t5, i32* %p
+  store volatile i32 %t5, ptr %p
   %t6 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 1.5)
-  store volatile i32 %t6, i32* %p
+  store volatile i32 %t6, ptr %p
   %t7 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -1.0)
-  store volatile i32 %t7, i32* %p
+  store volatile i32 %t7, ptr %p
   %t8 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xbff19999a0000000); -0x1.19999ap+0
-  store volatile i32 %t8, i32* %p
+  store volatile i32 %t8, ptr %p
   %t9 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -1.5)
-  store volatile i32 %t9, i32* %p
+  store volatile i32 %t9, ptr %p
   %t10 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xbffe666660000000); -1.9
-  store volatile i32 %t10, i32* %p
+  store volatile i32 %t10, ptr %p
   %t11 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -2.0)
-  store volatile i32 %t11, i32* %p
+  store volatile i32 %t11, ptr %p
   %t12 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 2147483520.0)
-  store volatile i32 %t12, i32* %p
+  store volatile i32 %t12, ptr %p
   %t13 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -2147483648.0)
-  store volatile i32 %t13, i32* %p
+  store volatile i32 %t13, ptr %p
   %t14 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 2147483648.0)
-  store volatile i32 %t14, i32* %p
+  store volatile i32 %t14, ptr %p
   %t15 = call i32 @llvm.wasm.trunc.signed.i32.f32(float -2147483904.0)
-  store volatile i32 %t15, i32* %p
+  store volatile i32 %t15, ptr %p
   %t16 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7ff0000000000000); inf
-  store volatile i32 %t16, i32* %p
+  store volatile i32 %t16, ptr %p
   %t17 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xfff0000000000000); -inf
-  store volatile i32 %t17, i32* %p
+  store volatile i32 %t17, ptr %p
   %t18 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7ff8000000000000); nan
-  store volatile i32 %t18, i32* %p
+  store volatile i32 %t18, ptr %p
   %t19 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0x7ffa000000000000); nan:0x200000
-  store volatile i32 %t19, i32* %p
+  store volatile i32 %t19, ptr %p
   %t20 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xfff8000000000000); -nan
-  store volatile i32 %t20, i32* %p
+  store volatile i32 %t20, ptr %p
   %t21 = call i32 @llvm.wasm.trunc.signed.i32.f32(float 0xfffa000000000000); -nan:0x200000
-  store volatile i32 %t21, i32* %p
+  store volatile i32 %t21, ptr %p
   ret void
 }
 
-define void @test_i32_trunc_f32_u(i32* %p) {
+define void @test_i32_trunc_f32_u(ptr %p) {
 ; CHECK-LABEL: @test_i32_trunc_f32_u(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2147483648, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -256, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2147483648, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -256, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
 ; CHECK-NEXT:    [[T13:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x41F0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T13]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T13]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T14:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float -1.000000e+00)
-; CHECK-NEXT:    store volatile i32 [[T14]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T14]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T15:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T15]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T15]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T16:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T16]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T16]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T17:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T17]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T17]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T18:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7FFA000000000000)
-; CHECK-NEXT:    store volatile i32 [[T18]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T18]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T19:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T19]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T19]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T20:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xFFFA000000000000)
-; CHECK-NEXT:    store volatile i32 [[T20]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T20]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float +0.0)
-  store volatile i32 %t0, i32* %p
+  store volatile i32 %t0, ptr %p
   %t1 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float -0.0)
-  store volatile i32 %t1, i32* %p
+  store volatile i32 %t1, ptr %p
   %t2 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x36a0000000000000); 0x1p-149
-  store volatile i32 %t2, i32* %p
+  store volatile i32 %t2, ptr %p
   %t3 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xb6a0000000000000); -0x1p-149
-  store volatile i32 %t3, i32* %p
+  store volatile i32 %t3, ptr %p
   %t4 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 1.0)
-  store volatile i32 %t4, i32* %p
+  store volatile i32 %t4, ptr %p
   %t5 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x3ff19999a0000000); 0x1.19999ap+0
-  store volatile i32 %t5, i32* %p
+  store volatile i32 %t5, ptr %p
   %t6 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 1.5)
-  store volatile i32 %t6, i32* %p
+  store volatile i32 %t6, ptr %p
   %t7 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x3ffe666660000000); 1.9
-  store volatile i32 %t7, i32* %p
+  store volatile i32 %t7, ptr %p
   %t8 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 2.0)
-  store volatile i32 %t8, i32* %p
+  store volatile i32 %t8, ptr %p
   %t9 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 2147483648.0)
-  store volatile i32 %t9, i32* %p
+  store volatile i32 %t9, ptr %p
   %t10 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 4294967040.0)
-  store volatile i32 %t10, i32* %p
+  store volatile i32 %t10, ptr %p
   %t11 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xbfecccccc0000000); -0x1.ccccccp-1
-  store volatile i32 %t11, i32* %p
+  store volatile i32 %t11, ptr %p
   %t12 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xbfefffffe0000000); -0x1.fffffep-1
-  store volatile i32 %t12, i32* %p
+  store volatile i32 %t12, ptr %p
   %t13 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 4294967296.0)
-  store volatile i32 %t13, i32* %p
+  store volatile i32 %t13, ptr %p
   %t14 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float -1.0)
-  store volatile i32 %t14, i32* %p
+  store volatile i32 %t14, ptr %p
   %t15 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7ff0000000000000); inf
-  store volatile i32 %t15, i32* %p
+  store volatile i32 %t15, ptr %p
   %t16 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xfff0000000000000); -inf
-  store volatile i32 %t16, i32* %p
+  store volatile i32 %t16, ptr %p
   %t17 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7ff8000000000000); nan
-  store volatile i32 %t17, i32* %p
+  store volatile i32 %t17, ptr %p
   %t18 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0x7ffa000000000000); nan:0x200000
-  store volatile i32 %t18, i32* %p
+  store volatile i32 %t18, ptr %p
   %t19 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xfff8000000000000); -nan
-  store volatile i32 %t19, i32* %p
+  store volatile i32 %t19, ptr %p
   %t20 = call i32 @llvm.wasm.trunc.unsigned.i32.f32(float 0xfffa000000000000); -nan:0x200000
-  store volatile i32 %t20, i32* %p
+  store volatile i32 %t20, ptr %p
   ret void
 }
 
-define void @test_i32_trunc_f64_s(i32* %p) {
+define void @test_i32_trunc_f64_s(ptr %p) {
 ; CHECK-LABEL: @test_i32_trunc_f64_s(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2147483647, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2147483648, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2147483648, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2147483647, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2147483647, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2147483648, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2147483648, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2147483647, ptr [[P]], align 4
 ; CHECK-NEXT:    [[T16:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x41E0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T16]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T16]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T17:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xC1E0000000200000)
-; CHECK-NEXT:    store volatile i32 [[T17]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T17]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T18:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T18]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T18]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T19:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T19]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T19]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T20:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T20]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T20]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T21:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i32 [[T21]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T21]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T22:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T22]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T22]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T23:%.*]] = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i32 [[T23]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T23]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i32 @llvm.wasm.trunc.signed.i32.f64(double +0.0)
-  store volatile i32 %t0, i32* %p
+  store volatile i32 %t0, ptr %p
   %t1 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -0.0)
-  store volatile i32 %t1, i32* %p
+  store volatile i32 %t1, ptr %p
   %t2 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x0010000000000001); 0x0.0000000000001p-1022
-  store volatile i32 %t2, i32* %p
+  store volatile i32 %t2, ptr %p
   %t3 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x8010000000000001); -0x1.0000000000001p-1022
-  store volatile i32 %t3, i32* %p
+  store volatile i32 %t3, ptr %p
   %t4 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 1.0)
-  store volatile i32 %t4, i32* %p
+  store volatile i32 %t4, ptr %p
   %t5 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x3ff199999999999a); 0x1.199999999999ap+0
-  store volatile i32 %t5, i32* %p
+  store volatile i32 %t5, ptr %p
   %t6 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 1.5)
-  store volatile i32 %t6, i32* %p
+  store volatile i32 %t6, ptr %p
   %t7 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -1.0)
-  store volatile i32 %t7, i32* %p
+  store volatile i32 %t7, ptr %p
   %t8 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xbff199999999999a); -0x1.199999999999ap+0
-  store volatile i32 %t8, i32* %p
+  store volatile i32 %t8, ptr %p
   %t9 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -1.5)
-  store volatile i32 %t9, i32* %p
+  store volatile i32 %t9, ptr %p
   %t10 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xbffe666666666666); -1.9
-  store volatile i32 %t10, i32* %p
+  store volatile i32 %t10, ptr %p
   %t11 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -2.0)
-  store volatile i32 %t11, i32* %p
+  store volatile i32 %t11, ptr %p
   %t12 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 2147483647.0)
-  store volatile i32 %t12, i32* %p
+  store volatile i32 %t12, ptr %p
   %t13 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -2147483648.0)
-  store volatile i32 %t13, i32* %p
+  store volatile i32 %t13, ptr %p
   %t14 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -2147483648.9)
-  store volatile i32 %t14, i32* %p
+  store volatile i32 %t14, ptr %p
   %t15 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 2147483647.9)
-  store volatile i32 %t15, i32* %p
+  store volatile i32 %t15, ptr %p
   %t16 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 2147483648.0)
-  store volatile i32 %t16, i32* %p
+  store volatile i32 %t16, ptr %p
   %t17 = call i32 @llvm.wasm.trunc.signed.i32.f64(double -2147483649.0)
-  store volatile i32 %t17, i32* %p
+  store volatile i32 %t17, ptr %p
   %t18 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7ff0000000000000); inf
-  store volatile i32 %t18, i32* %p
+  store volatile i32 %t18, ptr %p
   %t19 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xfff0000000000000); -inf
-  store volatile i32 %t19, i32* %p
+  store volatile i32 %t19, ptr %p
   %t20 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7ff8000000000000); nan
-  store volatile i32 %t20, i32* %p
+  store volatile i32 %t20, ptr %p
   %t21 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7ff4000000000000); nan:0x4000000000000
-  store volatile i32 %t21, i32* %p
+  store volatile i32 %t21, ptr %p
   %t22 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0xfff8000000000000); -nan
-  store volatile i32 %t22, i32* %p
+  store volatile i32 %t22, ptr %p
   %t23 = call i32 @llvm.wasm.trunc.signed.i32.f64(double 0x7ff4000000000000); -nan:0x4000000000000
-  store volatile i32 %t23, i32* %p
+  store volatile i32 %t23, ptr %p
   ret void
 }
 
-define void @test_i32_trunc_f64_u(i32* %p) {
+define void @test_i32_trunc_f64_u(ptr %p) {
 ; CHECK-LABEL: @test_i32_trunc_f64_u(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 2, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -2147483648, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 100000000, i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 -1, i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 2, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -2147483648, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 100000000, ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 -1, ptr [[P]], align 4
 ; CHECK-NEXT:    [[T15:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x41F0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T15]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T15]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T16:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double -1.000000e+00)
-; CHECK-NEXT:    store volatile i32 [[T16]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T16]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T17:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 1.000000e+16)
-; CHECK-NEXT:    store volatile i32 [[T17]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T17]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T18:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 1.000000e+30)
-; CHECK-NEXT:    store volatile i32 [[T18]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T18]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T19:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x43E0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T19]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T19]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T20:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T20]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T20]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T21:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i32 [[T21]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T21]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T22:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T22]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T22]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T23:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i32 [[T23]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T23]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T24:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i32 [[T24]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T24]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[T25:%.*]] = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xFFF4000000000000)
-; CHECK-NEXT:    store volatile i32 [[T25]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[T25]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double +0.0)
-  store volatile i32 %t0, i32* %p
+  store volatile i32 %t0, ptr %p
   %t1 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double -0.0)
-  store volatile i32 %t1, i32* %p
+  store volatile i32 %t1, ptr %p
   %t2 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x0010000000000001); 0x0.0000000000001p-1022
-  store volatile i32 %t2, i32* %p
+  store volatile i32 %t2, ptr %p
   %t3 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x8010000000000001); -0x0.0000000000001p-1022
-  store volatile i32 %t3, i32* %p
+  store volatile i32 %t3, ptr %p
   %t4 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 1.0)
-  store volatile i32 %t4, i32* %p
+  store volatile i32 %t4, ptr %p
   %t5 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x3ff199999999999a); 0x1.199999999999ap+0
-  store volatile i32 %t5, i32* %p
+  store volatile i32 %t5, ptr %p
   %t6 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 1.5)
-  store volatile i32 %t6, i32* %p
+  store volatile i32 %t6, ptr %p
   %t7 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x3ffe666666666666); 1.9
-  store volatile i32 %t7, i32* %p
+  store volatile i32 %t7, ptr %p
   %t8 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 2.0)
-  store volatile i32 %t8, i32* %p
+  store volatile i32 %t8, ptr %p
   %t9 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 2147483648.0)
-  store volatile i32 %t9, i32* %p
+  store volatile i32 %t9, ptr %p
   %t10 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 4294967295.0)
-  store volatile i32 %t10, i32* %p
+  store volatile i32 %t10, ptr %p
   %t11 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xbfeccccccccccccd); -0x1.ccccccccccccdp-1
-  store volatile i32 %t11, i32* %p
+  store volatile i32 %t11, ptr %p
   %t12 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xbfefffffffffffff); -0x1.fffffffffffffp-1
-  store volatile i32 %t12, i32* %p
+  store volatile i32 %t12, ptr %p
   %t13 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 100000000.0)
-  store volatile i32 %t13, i32* %p
+  store volatile i32 %t13, ptr %p
   %t14 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 4294967295.9)
-  store volatile i32 %t14, i32* %p
+  store volatile i32 %t14, ptr %p
   %t15 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 4294967296.0)
-  store volatile i32 %t15, i32* %p
+  store volatile i32 %t15, ptr %p
   %t16 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double -1.0)
-  store volatile i32 %t16, i32* %p
+  store volatile i32 %t16, ptr %p
   %t17 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 10000000000000000.0); 1e16
-  store volatile i32 %t17, i32* %p
+  store volatile i32 %t17, ptr %p
   %t18 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 1000000000000000000000000000000.0); 1e30
-  store volatile i32 %t18, i32* %p
+  store volatile i32 %t18, ptr %p
   %t19 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 9223372036854775808.0)
-  store volatile i32 %t19, i32* %p
+  store volatile i32 %t19, ptr %p
   %t20 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7ff0000000000000); inf
-  store volatile i32 %t20, i32* %p
+  store volatile i32 %t20, ptr %p
   %t21 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xfff0000000000000); -inf
-  store volatile i32 %t21, i32* %p
+  store volatile i32 %t21, ptr %p
   %t22 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7ff8000000000000); nan
-  store volatile i32 %t22, i32* %p
+  store volatile i32 %t22, ptr %p
   %t23 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0x7ff4000000000000); nan:0x4000000000000
-  store volatile i32 %t23, i32* %p
+  store volatile i32 %t23, ptr %p
   %t24 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xfff8000000000000); -nan
-  store volatile i32 %t24, i32* %p
+  store volatile i32 %t24, ptr %p
   %t25 = call i32 @llvm.wasm.trunc.unsigned.i32.f64(double 0xfff4000000000000); -nan:0x4000000000000
-  store volatile i32 %t25, i32* %p
+  store volatile i32 %t25, ptr %p
   ret void
 }
 
-define void @test_i64_trunc_f32_s(i64* %p) {
+define void @test_i64_trunc_f32_s(ptr %p) {
 ; CHECK-LABEL: @test_i64_trunc_f32_s(
-; CHECK-NEXT:    store volatile i64 0, i64* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -2, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 9223371487098961920, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -9223372036854775808, i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -2, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 9223371487098961920, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -9223372036854775808, ptr [[P]], align 8
 ; CHECK-NEXT:    [[T16:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x43E0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T16]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T16]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T17:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xC3E0000020000000)
-; CHECK-NEXT:    store volatile i64 [[T17]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T17]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T18:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T18]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T18]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T19:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T19]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T19]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T20:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T20]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T20]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T21:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7FFA000000000000)
-; CHECK-NEXT:    store volatile i64 [[T21]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T21]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T22:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T22]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T22]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T23:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xFFFA000000000000)
-; CHECK-NEXT:    store volatile i64 [[T23]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T23]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i64 @llvm.wasm.trunc.signed.i64.f32(float +0.0)
-  store volatile i64 %t0, i64* %p
+  store volatile i64 %t0, ptr %p
   %t1 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -0.0)
-  store volatile i64 %t1, i64* %p
+  store volatile i64 %t1, ptr %p
   %t2 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x36a0000000000000); 0x1p-149
-  store volatile i64 %t2, i64* %p
+  store volatile i64 %t2, ptr %p
   %t3 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xb6a0000000000000); -0x1p-149
-  store volatile i64 %t3, i64* %p
+  store volatile i64 %t3, ptr %p
   %t4 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 1.0)
-  store volatile i64 %t4, i64* %p
+  store volatile i64 %t4, ptr %p
   %t5 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x3ff19999a0000000); 0x1.19999ap+0
-  store volatile i64 %t5, i64* %p
+  store volatile i64 %t5, ptr %p
   %t6 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 1.5)
-  store volatile i64 %t6, i64* %p
+  store volatile i64 %t6, ptr %p
   %t7 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -1.0)
-  store volatile i64 %t7, i64* %p
+  store volatile i64 %t7, ptr %p
   %t8 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xbff19999a0000000); -0x1.19999ap+0
-  store volatile i64 %t8, i64* %p
+  store volatile i64 %t8, ptr %p
   %t9 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -1.5)
-  store volatile i64 %t9, i64* %p
+  store volatile i64 %t9, ptr %p
   %t10 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xbffe666660000000); -1.9
-  store volatile i64 %t10, i64* %p
+  store volatile i64 %t10, ptr %p
   %t11 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -2.0)
-  store volatile i64 %t11, i64* %p
+  store volatile i64 %t11, ptr %p
   %t12 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 4294967296.0)
-  store volatile i64 %t12, i64* %p
+  store volatile i64 %t12, ptr %p
   %t13 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -4294967296.0)
-  store volatile i64 %t13, i64* %p
+  store volatile i64 %t13, ptr %p
   %t14 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 9223371487098961920.0)
-  store volatile i64 %t14, i64* %p
+  store volatile i64 %t14, ptr %p
   %t15 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -9223372036854775808.0)
-  store volatile i64 %t15, i64* %p
+  store volatile i64 %t15, ptr %p
   %t16 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 9223372036854775808.0)
-  store volatile i64 %t16, i64* %p
+  store volatile i64 %t16, ptr %p
   %t17 = call i64 @llvm.wasm.trunc.signed.i64.f32(float -9223373136366403584.0)
-  store volatile i64 %t17, i64* %p
+  store volatile i64 %t17, ptr %p
   %t18 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7ff0000000000000); inf
-  store volatile i64 %t18, i64* %p
+  store volatile i64 %t18, ptr %p
   %t19 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xfff0000000000000); -inf
-  store volatile i64 %t19, i64* %p
+  store volatile i64 %t19, ptr %p
   %t20 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7ff8000000000000); nan
-  store volatile i64 %t20, i64* %p
+  store volatile i64 %t20, ptr %p
   %t21 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0x7ffa000000000000); nan:0x200000
-  store volatile i64 %t21, i64* %p
+  store volatile i64 %t21, ptr %p
   %t22 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xfff8000000000000); -nan
-  store volatile i64 %t22, i64* %p
+  store volatile i64 %t22, ptr %p
   %t23 = call i64 @llvm.wasm.trunc.signed.i64.f32(float 0xfffa000000000000); -nan:0x200000
-  store volatile i64 %t23, i64* %p
+  store volatile i64 %t23, ptr %p
   ret void
 }
 
-define void @test_i64_trunc_f32_u(i64* %p) {
+define void @test_i64_trunc_f32_u(ptr %p) {
 ; CHECK-LABEL: @test_i64_trunc_f32_u(
-; CHECK-NEXT:    store volatile i64 0, i64* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1099511627776, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1099511627776, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
 ; CHECK-NEXT:    [[T11:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x43F0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T11]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T11]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T12:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float -1.000000e+00)
-; CHECK-NEXT:    store volatile i64 [[T12]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T12]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T13:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T13]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T13]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T14:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T14]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T14]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T15:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T15]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T15]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T16:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7FFA000000000000)
-; CHECK-NEXT:    store volatile i64 [[T16]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T16]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T17:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T17]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T17]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T18:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xFFFA000000000000)
-; CHECK-NEXT:    store volatile i64 [[T18]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T18]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float +0.0)
-  store volatile i64 %t0, i64* %p
+  store volatile i64 %t0, ptr %p
   %t1 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float -0.0)
-  store volatile i64 %t1, i64* %p
+  store volatile i64 %t1, ptr %p
   %t2 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x36a0000000000000); 0x1p-149
-  store volatile i64 %t2, i64* %p
+  store volatile i64 %t2, ptr %p
   %t3 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xb6a0000000000000); -0x1p-149
-  store volatile i64 %t3, i64* %p
+  store volatile i64 %t3, ptr %p
   %t4 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 1.0)
-  store volatile i64 %t4, i64* %p
+  store volatile i64 %t4, ptr %p
   %t5 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x3ff19999a0000000); 0x1.19999ap+0
-  store volatile i64 %t5, i64* %p
+  store volatile i64 %t5, ptr %p
   %t6 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 1.5)
-  store volatile i64 %t6, i64* %p
+  store volatile i64 %t6, ptr %p
   %t7 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 4294967296.0)
-  store volatile i64 %t7, i64* %p
+  store volatile i64 %t7, ptr %p
   %t8 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 18446742974197923840.0)
-  store volatile i64 %t8, i64* %p
+  store volatile i64 %t8, ptr %p
   %t9 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xbfecccccc0000000); -0x1.ccccccp-1
-  store volatile i64 %t9, i64* %p
+  store volatile i64 %t9, ptr %p
   %t10 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xbfefffffe0000000); -0x1.fffffep-1
-  store volatile i64 %t10, i64* %p
+  store volatile i64 %t10, ptr %p
   %t11 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 18446744073709551616.0)
-  store volatile i64 %t11, i64* %p
+  store volatile i64 %t11, ptr %p
   %t12 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float -1.0)
-  store volatile i64 %t12, i64* %p
+  store volatile i64 %t12, ptr %p
   %t13 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7ff0000000000000); inf
-  store volatile i64 %t13, i64* %p
+  store volatile i64 %t13, ptr %p
   %t14 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xfff0000000000000); -inf
-  store volatile i64 %t14, i64* %p
+  store volatile i64 %t14, ptr %p
   %t15 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7ff8000000000000); nan
-  store volatile i64 %t15, i64* %p
+  store volatile i64 %t15, ptr %p
   %t16 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0x7ffa000000000000); nan:0x200000
-  store volatile i64 %t16, i64* %p
+  store volatile i64 %t16, ptr %p
   %t17 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xfff8000000000000); -nan
-  store volatile i64 %t17, i64* %p
+  store volatile i64 %t17, ptr %p
   %t18 = call i64 @llvm.wasm.trunc.unsigned.i64.f32(float 0xfffa000000000000); -nan:0x200000
-  store volatile i64 %t18, i64* %p
+  store volatile i64 %t18, ptr %p
   ret void
 }
 
-define void @test_i64_trunc_f64_s(i64* %p) {
+define void @test_i64_trunc_f64_s(ptr %p) {
 ; CHECK-LABEL: @test_i64_trunc_f64_s(
-; CHECK-NEXT:    store volatile i64 0, i64* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -2, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 9223372036854774784, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -9223372036854775808, i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -2, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 9223372036854774784, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -9223372036854775808, ptr [[P]], align 8
 ; CHECK-NEXT:    [[T16:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x43E0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T16]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T16]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T17:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xC3E0000000000001)
-; CHECK-NEXT:    store volatile i64 [[T17]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T17]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T18:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T18]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T18]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T19:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T19]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T19]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T20:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T20]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T20]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T21:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i64 [[T21]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T21]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T22:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T22]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T22]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T23:%.*]] = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i64 [[T23]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T23]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i64 @llvm.wasm.trunc.signed.i64.f64(double +0.0)
-  store volatile i64 %t0, i64* %p
+  store volatile i64 %t0, ptr %p
   %t1 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -0.0)
-  store volatile i64 %t1, i64* %p
+  store volatile i64 %t1, ptr %p
   %t2 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x0010000000000001); 0x0.0000000000001p-1022
-  store volatile i64 %t2, i64* %p
+  store volatile i64 %t2, ptr %p
   %t3 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x8010000000000001); -0x1.0000000000001p-1022
-  store volatile i64 %t3, i64* %p
+  store volatile i64 %t3, ptr %p
   %t4 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 1.0)
-  store volatile i64 %t4, i64* %p
+  store volatile i64 %t4, ptr %p
   %t5 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x3ff199999999999a); 0x1.199999999999ap+0
-  store volatile i64 %t5, i64* %p
+  store volatile i64 %t5, ptr %p
   %t6 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 1.5)
-  store volatile i64 %t6, i64* %p
+  store volatile i64 %t6, ptr %p
   %t7 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -1.0)
-  store volatile i64 %t7, i64* %p
+  store volatile i64 %t7, ptr %p
   %t8 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xbff199999999999a); -0x1.199999999999ap+0
-  store volatile i64 %t8, i64* %p
+  store volatile i64 %t8, ptr %p
   %t9 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -1.5)
-  store volatile i64 %t9, i64* %p
+  store volatile i64 %t9, ptr %p
   %t10 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xbffe666666666666); -1.9
-  store volatile i64 %t10, i64* %p
+  store volatile i64 %t10, ptr %p
   %t11 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -2.0)
-  store volatile i64 %t11, i64* %p
+  store volatile i64 %t11, ptr %p
   %t12 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 4294967296.0)
-  store volatile i64 %t12, i64* %p
+  store volatile i64 %t12, ptr %p
   %t13 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -4294967296.0)
-  store volatile i64 %t13, i64* %p
+  store volatile i64 %t13, ptr %p
   %t14 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 9223372036854774784.0)
-  store volatile i64 %t14, i64* %p
+  store volatile i64 %t14, ptr %p
   %t15 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -9223372036854775808.0)
-  store volatile i64 %t15, i64* %p
+  store volatile i64 %t15, ptr %p
   %t16 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 9223372036854775808.0)
-  store volatile i64 %t16, i64* %p
+  store volatile i64 %t16, ptr %p
   %t17 = call i64 @llvm.wasm.trunc.signed.i64.f64(double -9223372036854777856.0)
-  store volatile i64 %t17, i64* %p
+  store volatile i64 %t17, ptr %p
   %t18 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7ff0000000000000); inf
-  store volatile i64 %t18, i64* %p
+  store volatile i64 %t18, ptr %p
   %t19 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xfff0000000000000); -inf
-  store volatile i64 %t19, i64* %p
+  store volatile i64 %t19, ptr %p
   %t20 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7ff8000000000000); nan
-  store volatile i64 %t20, i64* %p
+  store volatile i64 %t20, ptr %p
   %t21 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7ff4000000000000); nan:0x4000000000000
-  store volatile i64 %t21, i64* %p
+  store volatile i64 %t21, ptr %p
   %t22 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0xfff8000000000000); -nan
-  store volatile i64 %t22, i64* %p
+  store volatile i64 %t22, ptr %p
   %t23 = call i64 @llvm.wasm.trunc.signed.i64.f64(double 0x7ff4000000000000); -nan:0x4000000000000
-  store volatile i64 %t23, i64* %p
+  store volatile i64 %t23, ptr %p
   ret void
 }
 
-define void @test_i64_trunc_f64_u(i64* %p) {
+define void @test_i64_trunc_f64_u(ptr %p) {
 ; CHECK-LABEL: @test_i64_trunc_f64_u(
-; CHECK-NEXT:    store volatile i64 0, i64* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 1, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 4294967295, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 4294967296, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -2048, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 0, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 100000000, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 10000000000000000, i64* [[P]], align 8
-; CHECK-NEXT:    store volatile i64 -9223372036854775808, i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 1, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 4294967295, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 4294967296, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -2048, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 0, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 100000000, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 10000000000000000, ptr [[P]], align 8
+; CHECK-NEXT:    store volatile i64 -9223372036854775808, ptr [[P]], align 8
 ; CHECK-NEXT:    [[T15:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x43F0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T15]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T15]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T16:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double -1.000000e+00)
-; CHECK-NEXT:    store volatile i64 [[T16]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T16]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T17:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7FF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T17]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T17]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T18:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xFFF0000000000000)
-; CHECK-NEXT:    store volatile i64 [[T18]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T18]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T19:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7FF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T19]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T19]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T20:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7FF4000000000000)
-; CHECK-NEXT:    store volatile i64 [[T20]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T20]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T21:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xFFF8000000000000)
-; CHECK-NEXT:    store volatile i64 [[T21]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T21]], ptr [[P]], align 8
 ; CHECK-NEXT:    [[T22:%.*]] = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xFFF4000000000000)
-; CHECK-NEXT:    store volatile i64 [[T22]], i64* [[P]], align 8
+; CHECK-NEXT:    store volatile i64 [[T22]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %t0 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double +0.0)
-  store volatile i64 %t0, i64* %p
+  store volatile i64 %t0, ptr %p
   %t1 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double -0.0)
-  store volatile i64 %t1, i64* %p
+  store volatile i64 %t1, ptr %p
   %t2 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x0010000000000001); 0x0.0000000000001p-1022
-  store volatile i64 %t2, i64* %p
+  store volatile i64 %t2, ptr %p
   %t3 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x8010000000000001); -0x0.0000000000001p-1022
-  store volatile i64 %t3, i64* %p
+  store volatile i64 %t3, ptr %p
   %t4 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 1.0)
-  store volatile i64 %t4, i64* %p
+  store volatile i64 %t4, ptr %p
   %t5 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x3ff199999999999a); 0x1.199999999999ap+0
-  store volatile i64 %t5, i64* %p
+  store volatile i64 %t5, ptr %p
   %t6 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 1.5)
-  store volatile i64 %t6, i64* %p
+  store volatile i64 %t6, ptr %p
   %t7 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 4294967295.0)
-  store volatile i64 %t7, i64* %p
+  store volatile i64 %t7, ptr %p
   %t8 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 4294967296.0)
-  store volatile i64 %t8, i64* %p
+  store volatile i64 %t8, ptr %p
   %t9 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 18446744073709549568.0)
-  store volatile i64 %t9, i64* %p
+  store volatile i64 %t9, ptr %p
   %t10 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xbfeccccccccccccd); -0x1.ccccccccccccdp-1
-  store volatile i64 %t10, i64* %p
+  store volatile i64 %t10, ptr %p
   %t11 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xbfefffffffffffff); -0x1.fffffffffffffp-1
-  store volatile i64 %t11, i64* %p
+  store volatile i64 %t11, ptr %p
   %t12 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 100000000.0); 1e8
-  store volatile i64 %t12, i64* %p
+  store volatile i64 %t12, ptr %p
   %t13 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 10000000000000000.0); 1e16
-  store volatile i64 %t13, i64* %p
+  store volatile i64 %t13, ptr %p
   %t14 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 9223372036854775808.0);
-  store volatile i64 %t14, i64* %p
+  store volatile i64 %t14, ptr %p
   %t15 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 18446744073709551616.0)
-  store volatile i64 %t15, i64* %p
+  store volatile i64 %t15, ptr %p
   %t16 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double -1.0)
-  store volatile i64 %t16, i64* %p
+  store volatile i64 %t16, ptr %p
   %t17 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7ff0000000000000); inf
-  store volatile i64 %t17, i64* %p
+  store volatile i64 %t17, ptr %p
   %t18 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xfff0000000000000); -inf
-  store volatile i64 %t18, i64* %p
+  store volatile i64 %t18, ptr %p
   %t19 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7ff8000000000000); nan
-  store volatile i64 %t19, i64* %p
+  store volatile i64 %t19, ptr %p
   %t20 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0x7ff4000000000000); nan:0x4000000000000
-  store volatile i64 %t20, i64* %p
+  store volatile i64 %t20, ptr %p
   %t21 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xfff8000000000000); -nan
-  store volatile i64 %t21, i64* %p
+  store volatile i64 %t21, ptr %p
   %t22 = call i64 @llvm.wasm.trunc.unsigned.i64.f64(double 0xfff4000000000000); -nan:0x4000000000000
-  store volatile i64 %t22, i64* %p
+  store volatile i64 %t22, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/allones.ll b/llvm/test/Transforms/InstSimplify/ConstProp/allones.ll
index 2dc381d0949bd..9d170dedd4be3 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/allones.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/allones.ll
@@ -8,10 +8,10 @@ target triple = "armv7-unknown-linux-gnueabi"
 
 define i32 @allones_struct() {
 ; CHECK-LABEL: @allones_struct()
-; CHECK-NEXT:    %1 = load [1 x i32], [1 x i32]* bitcast (%struct.anon* @onesstruct to [1 x i32]*), align 4
+; CHECK-NEXT:    %1 = load [1 x i32], ptr @onesstruct, align 4
 ; CHECK-NEXT:    %2 = extractvalue [1 x i32] %1, 0
 ; CHECK-NEXT:    ret i32 %2
-  %1 = load [1 x i32], [1 x i32]* bitcast (%struct.anon* @onesstruct to [1 x i32]*), align 4
+  %1 = load [1 x i32], ptr @onesstruct, align 4
   %2 = extractvalue [1 x i32] %1, 0
   ret i32 %2
 }
@@ -19,28 +19,28 @@ define i32 @allones_struct() {
 define i32 @allones_int() {
 ; CHECK-LABEL: @allones_int()
 ; CHECK-NEXT:    ret i32 -1
-  %1 = load i32, i32* bitcast (%struct.anon* @onesstruct to i32*), align 4
+  %1 = load i32, ptr @onesstruct, align 4
   ret i32 %1
 }
 
-define i32* @allones_ptr() {
+define ptr @allones_ptr() {
 ; CHECK-LABEL: @allones_ptr()
-; CHECK-NEXT:    ret i32* inttoptr (i32 -1 to i32*)
-  %1 = load i32*, i32** bitcast (%struct.anon* @onesstruct to i32**), align 4
-  ret i32* %1
+; CHECK-NEXT:    ret ptr inttoptr (i32 -1 to ptr)
+  %1 = load ptr, ptr @onesstruct, align 4
+  ret ptr %1
 }
 
-define i32 addrspace(1)* @allones_ptr1() {
+define ptr addrspace(1) @allones_ptr1() {
 ; CHECK-LABEL: @allones_ptr1()
-; CHECK-NEXT:    ret i32 addrspace(1)* inttoptr (i32 -1 to i32 addrspace(1)*)
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)** bitcast (%struct.anon* @onesstruct to i32 addrspace(1)**), align 4
-  ret i32 addrspace(1)* %1
+; CHECK-NEXT:    ret ptr addrspace(1) inttoptr (i32 -1 to ptr addrspace(1))
+  %1 = load ptr addrspace(1), ptr @onesstruct, align 4
+  ret ptr addrspace(1) %1
 }
 
-define i32 addrspace(2)* @allones_ptr2() {
+define ptr addrspace(2) @allones_ptr2() {
 ; CHECK-LABEL: @allones_ptr2()
-; CHECK-NEXT:    %1 = load i32 addrspace(2)*, i32 addrspace(2)** bitcast (%struct.anon* @onesstruct to i32 addrspace(2)**), align 4
-; CHECK-NEXT:    ret i32 addrspace(2)* %1
-  %1 = load i32 addrspace(2)*, i32 addrspace(2)** bitcast (%struct.anon* @onesstruct to i32 addrspace(2)**), align 4
-  ret i32 addrspace(2)* %1
+; CHECK-NEXT:    %1 = load ptr addrspace(2), ptr @onesstruct, align 4
+; CHECK-NEXT:    ret ptr addrspace(2) %1
+  %1 = load ptr addrspace(2), ptr @onesstruct, align 4
+  ret ptr addrspace(2) %1
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/basictest.ll b/llvm/test/Transforms/InstSimplify/ConstProp/basictest.ll
index d4181ee5d9660..5c8f74705ba63 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/basictest.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/basictest.ll
@@ -24,9 +24,9 @@ BB3:
 
 
 ; PR6197
-define i1 @test2(i8* %f) nounwind {
+define i1 @test2(ptr %f) nounwind {
 entry:
-  %V = icmp ne i8* blockaddress(@test2, %bb), null
+  %V = icmp ne ptr blockaddress(@test2, %bb), null
   br label %bb
 bb:
   ret i1 %V

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/bitcast.ll b/llvm/test/Transforms/InstSimplify/ConstProp/bitcast.ll
index 8445a0562e78b..05160b9ad72ab 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/bitcast.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/bitcast.ll
@@ -17,9 +17,9 @@ define <1 x i64> @test1() {
 
 define i1 @bad_icmp_constexpr_bitcast() {
 ; CHECK-LABEL: @bad_icmp_constexpr_bitcast(
-; CHECK-NEXT:    ret i1 icmp eq (i32 ptrtoint (i16* @a to i32), i32 bitcast (float fadd (float bitcast (i32 ptrtoint (i16* @b to i32) to float), float 2.000000e+00) to i32))
+; CHECK-NEXT:    ret i1 icmp eq (i32 ptrtoint (ptr @a to i32), i32 bitcast (float fadd (float bitcast (i32 ptrtoint (ptr @b to i32) to float), float 2.000000e+00) to i32))
 ;
-  %cmp = icmp eq i32 ptrtoint (i16* @a to i32), bitcast (float fadd (float bitcast (i32 ptrtoint (i16* @b to i32) to float), float 2.0) to i32)
+  %cmp = icmp eq i32 ptrtoint (ptr @a to i32), bitcast (float fadd (float bitcast (i32 ptrtoint (ptr @b to i32) to float), float 2.0) to i32)
   ret i1 %cmp
 }
 
@@ -30,9 +30,9 @@ define i1 @bad_icmp_constexpr_bitcast() {
 
 define i1 @bad_fcmp_constexpr_bitcast() {
 ; CHECK-LABEL: @bad_fcmp_constexpr_bitcast(
-; CHECK-NEXT:    ret i1 fcmp oeq (float bitcast (i32 ptrtoint (i16* @c to i32) to float), float bitcast (i32 add (i32 ptrtoint (i16* @d to i32), i32 2) to float))
+; CHECK-NEXT:    ret i1 fcmp oeq (float bitcast (i32 ptrtoint (ptr @c to i32) to float), float bitcast (i32 add (i32 ptrtoint (ptr @d to i32), i32 2) to float))
 ;
-  %cmp = fcmp oeq float bitcast (i32 ptrtoint (i16* @c to i32) to float), bitcast (i32 add (i32 ptrtoint (i16* @d to i32), i32 2) to float)
+  %cmp = fcmp oeq float bitcast (i32 ptrtoint (ptr @c to i32) to float), bitcast (i32 add (i32 ptrtoint (ptr @d to i32), i32 2) to float)
   ret i1 %cmp
 }
 
@@ -40,9 +40,9 @@ define i1 @bad_fcmp_constexpr_bitcast() {
 
 define i1 @fcmp_constexpr_oeq(float %conv) {
 ; CHECK-LABEL: @fcmp_constexpr_oeq(
-; CHECK-NEXT:    ret i1 fcmp oeq (float bitcast (i32 ptrtoint (i16* @a to i32) to float), float bitcast (i32 ptrtoint (i16* @a to i32) to float))
+; CHECK-NEXT:    ret i1 fcmp oeq (float bitcast (i32 ptrtoint (ptr @a to i32) to float), float bitcast (i32 ptrtoint (ptr @a to i32) to float))
 ;
-  %cmp = fcmp oeq float bitcast (i32 ptrtoint (i16* @a to i32) to float), bitcast (i32 ptrtoint (i16* @a to i32) to float)
+  %cmp = fcmp oeq float bitcast (i32 ptrtoint (ptr @a to i32) to float), bitcast (i32 ptrtoint (ptr @a to i32) to float)
   ret i1 %cmp
 }
 
@@ -50,9 +50,9 @@ define i1 @fcmp_constexpr_oeq(float %conv) {
 
 define i1 @fcmp_constexpr_une(float %conv) {
 ; CHECK-LABEL: @fcmp_constexpr_une(
-; CHECK-NEXT:    ret i1 fcmp une (float bitcast (i32 ptrtoint (i16* @a to i32) to float), float bitcast (i32 ptrtoint (i16* @a to i32) to float))
+; CHECK-NEXT:    ret i1 fcmp une (float bitcast (i32 ptrtoint (ptr @a to i32) to float), float bitcast (i32 ptrtoint (ptr @a to i32) to float))
 ;
-  %cmp = fcmp une float bitcast (i32 ptrtoint (i16* @a to i32) to float), bitcast (i32 ptrtoint (i16* @a to i32) to float)
+  %cmp = fcmp une float bitcast (i32 ptrtoint (ptr @a to i32) to float), bitcast (i32 ptrtoint (ptr @a to i32) to float)
   ret i1 %cmp
 }
 
@@ -60,7 +60,7 @@ define i1 @fcmp_constexpr_ueq(float %conv) {
 ; CHECK-LABEL: @fcmp_constexpr_ueq(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = fcmp ueq float bitcast (i32 ptrtoint (i16* @a to i32) to float), bitcast (i32 ptrtoint (i16* @a to i32) to float)
+  %cmp = fcmp ueq float bitcast (i32 ptrtoint (ptr @a to i32) to float), bitcast (i32 ptrtoint (ptr @a to i32) to float)
   ret i1 %cmp
 }
 
@@ -68,25 +68,24 @@ define i1 @fcmp_constexpr_one(float %conv) {
 ; CHECK-LABEL: @fcmp_constexpr_one(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = fcmp one float bitcast (i32 ptrtoint (i16* @a to i32) to float), bitcast (i32 ptrtoint (i16* @a to i32) to float)
+  %cmp = fcmp one float bitcast (i32 ptrtoint (ptr @a to i32) to float), bitcast (i32 ptrtoint (ptr @a to i32) to float)
   ret i1 %cmp
 }
 
 %T = type { i8 }
 @G = external global %T
 
-define i8* @bitcast_to_gep() {
+define ptr @bitcast_to_gep() {
 ; CHECK-LABEL: @bitcast_to_gep(
-; CHECK-NEXT:    ret i8* getelementptr inbounds (%T, %T* @G, i32 0, i32 0)
+; CHECK-NEXT:    ret ptr @G
 ;
-  %p = bitcast %T* @G to i8*
-  ret i8* %p
+  ret ptr @G
 }
 
-define i8 addrspace(1)* @addrspacecast_to_gep() {
+define ptr addrspace(1) @addrspacecast_to_gep() {
 ; CHECK-LABEL: @addrspacecast_to_gep(
-; CHECK-NEXT:    ret i8 addrspace(1)* addrspacecast (i8* getelementptr inbounds (%T, %T* @G, i32 0, i32 0) to i8 addrspace(1)*)
+; CHECK-NEXT:    ret ptr addrspace(1) addrspacecast (ptr @G to ptr addrspace(1))
 ;
-  %p = addrspacecast %T* @G to i8 addrspace(1)*
-  ret i8 addrspace(1)* %p
+  %p = addrspacecast ptr @G to ptr addrspace(1)
+  ret ptr addrspace(1) %p
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/calls-math-finite.ll b/llvm/test/Transforms/InstSimplify/ConstProp/calls-math-finite.ll
index 7744ca7d32409..eac1beb3ba05e 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/calls-math-finite.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/calls-math-finite.ll
@@ -36,26 +36,26 @@ define void @T() {
 ; CHECK-LABEL: @T(
 ; CHECK-NEXT:    [[SLOT:%.*]] = alloca double
 ; CHECK-NEXT:    [[SLOTF:%.*]] = alloca float
-; CHECK-NEXT:    store double 0.000000e+00, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x3FF921FB54442D18, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x3FE4978FA3269EE1, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x402422A497D6185E, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x403415E5BF6FB106, double* [[SLOT]]
-; CHECK-NEXT:    store double 8.000000e+00, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x3FF193EA7AAD030{{[AB]}}, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x3FDE8927964FD5FD, double* [[SLOT]]
-; CHECK-NEXT:    store double 1.000000e+00, double* [[SLOT]]
-; CHECK-NEXT:    store double 0x40240926E70949AE, double* [[SLOT]]
-; CHECK-NEXT:    store float 0.000000e+00, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x3FF921FB60000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x3FE4978FA0000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x402422A4A0000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x403415E5C0000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 8.000000e+00, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x3FF193EA80000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x3FDE8927A0000000, float* [[SLOTF]]
-; CHECK-NEXT:    store float 8.100000e+01, float* [[SLOTF]]
-; CHECK-NEXT:    store float 0x40240926E0000000, float* [[SLOTF]]
+; CHECK-NEXT:    store double 0.000000e+00, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x3FF921FB54442D18, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x3FE4978FA3269EE1, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x402422A497D6185E, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x403415E5BF6FB106, ptr [[SLOT]]
+; CHECK-NEXT:    store double 8.000000e+00, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x3FF193EA7AAD030{{[AB]}}, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x3FDE8927964FD5FD, ptr [[SLOT]]
+; CHECK-NEXT:    store double 1.000000e+00, ptr [[SLOT]]
+; CHECK-NEXT:    store double 0x40240926E70949AE, ptr [[SLOT]]
+; CHECK-NEXT:    store float 0.000000e+00, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x3FF921FB60000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x3FE4978FA0000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x402422A4A0000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x403415E5C0000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 8.000000e+00, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x3FF193EA80000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x3FDE8927A0000000, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 8.100000e+01, ptr [[SLOTF]]
+; CHECK-NEXT:    store float 0x40240926E0000000, ptr [[SLOTF]]
 ; CHECK-NEXT:    ret void
 ;
 ; MUSL-LABEL: @T(
@@ -104,46 +104,46 @@ define void @T() {
   %slotf = alloca float
 
   %ACOS = call fast double @__acos_finite(double 1.000000e+00)
-  store double %ACOS, double* %slot
+  store double %ACOS, ptr %slot
   %ASIN = call fast double @__asin_finite(double 1.000000e+00)
-  store double %ASIN, double* %slot
+  store double %ASIN, ptr %slot
   %ATAN2 = call fast double @__atan2_finite(double 3.000000e+00, double 4.000000e+00)
-  store double %ATAN2, double* %slot
+  store double %ATAN2, ptr %slot
   %COSH = call fast double @__cosh_finite(double 3.000000e+00)
-  store double %COSH, double* %slot
+  store double %COSH, ptr %slot
   %EXP = call fast double @__exp_finite(double 3.000000e+00)
-  store double %EXP, double* %slot
+  store double %EXP, ptr %slot
   %EXP2 = call fast double @__exp2_finite(double 3.000000e+00)
-  store double %EXP2, double* %slot
+  store double %EXP2, ptr %slot
   %LOG = call fast double @__log_finite(double 3.000000e+00)
-  store double %LOG, double* %slot
+  store double %LOG, ptr %slot
   %LOG10 = call fast double @__log10_finite(double 3.000000e+00)
-  store double %LOG10, double* %slot
+  store double %LOG10, ptr %slot
   %POW = call fast double @__pow_finite(double 1.000000e+00, double 4.000000e+00)
-  store double %POW, double* %slot
+  store double %POW, ptr %slot
   %SINH = call fast double @__sinh_finite(double 3.000000e+00)
-  store double %SINH, double* %slot
+  store double %SINH, ptr %slot
 
   %ACOSF = call fast float @__acosf_finite(float 1.000000e+00)
-  store float %ACOSF, float* %slotf
+  store float %ACOSF, ptr %slotf
   %ASINF = call fast float @__asinf_finite(float 1.000000e+00)
-  store float %ASINF, float* %slotf
+  store float %ASINF, ptr %slotf
   %ATAN2F = call fast float @__atan2f_finite(float 3.000000e+00, float 4.000000e+00)
-  store float %ATAN2F, float* %slotf
+  store float %ATAN2F, ptr %slotf
   %COSHF = call fast float @__coshf_finite(float 3.000000e+00)
-  store float %COSHF, float* %slotf
+  store float %COSHF, ptr %slotf
   %EXPF = call fast float @__expf_finite(float 3.000000e+00)
-  store float %EXPF, float* %slotf
+  store float %EXPF, ptr %slotf
   %EXP2F = call fast float @__exp2f_finite(float 3.000000e+00)
-  store float %EXP2F, float* %slotf
+  store float %EXP2F, ptr %slotf
   %LOGF = call fast float @__logf_finite(float 3.000000e+00)
-  store float %LOGF, float* %slotf
+  store float %LOGF, ptr %slotf
   %LOG10F = call fast float @__log10f_finite(float 3.000000e+00)
-  store float %LOG10F, float* %slotf
+  store float %LOG10F, ptr %slotf
   %POWF = call fast float @__powf_finite(float 3.000000e+00, float 4.000000e+00)
-  store float %POWF, float* %slotf
+  store float %POWF, ptr %slotf
   %SINHF = call fast float @__sinhf_finite(float 3.000000e+00)
-  store float %SINHF, float* %slotf
+  store float %SINHF, ptr %slotf
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/calls.ll
index 36b9280ff308c..61a30c781c0f4 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/calls.ll
@@ -63,118 +63,118 @@ define double @T() {
   %slotf = alloca float
 ; FNOBUILTIN: call
   %1 = call double @acos(double 1.000000e+00)
-  store double %1, double* %slot
+  store double %1, ptr %slot
 ; FNOBUILTIN: call
   %2 = call double @asin(double 1.000000e+00)
-  store double %2, double* %slot
+  store double %2, ptr %slot
 ; FNOBUILTIN: call
   %3 = call double @atan(double 3.000000e+00)
-  store double %3, double* %slot
+  store double %3, ptr %slot
 ; FNOBUILTIN: call
   %4 = call double @atan2(double 3.000000e+00, double 4.000000e+00)
-  store double %4, double* %slot
+  store double %4, ptr %slot
 ; FNOBUILTIN: call
   %5 = call double @ceil(double 3.000000e+00)
-  store double %5, double* %slot
+  store double %5, ptr %slot
 ; FNOBUILTIN: call
   %6 = call double @cosh(double 3.000000e+00)
-  store double %6, double* %slot
+  store double %6, ptr %slot
 ; FNOBUILTIN: call
   %7 = call double @exp(double 3.000000e+00)
-  store double %7, double* %slot
+  store double %7, ptr %slot
 ; FNOBUILTIN: call
   %8 = call double @exp2(double 3.000000e+00)
-  store double %8, double* %slot
+  store double %8, ptr %slot
 ; FNOBUILTIN: call
   %9 = call double @fabs(double 3.000000e+00)
-  store double %9, double* %slot
+  store double %9, ptr %slot
 ; FNOBUILTIN: call
   %10 = call double @floor(double 3.000000e+00)
-  store double %10, double* %slot
+  store double %10, ptr %slot
 ; FNOBUILTIN: call
   %11 = call double @fmod(double 3.000000e+00, double 4.000000e+00)
-  store double %11, double* %slot
+  store double %11, ptr %slot
 ; FNOBUILTIN: call
   %12 = call double @log(double 3.000000e+00)
-  store double %12, double* %slot
+  store double %12, ptr %slot
 ; FNOBUILTIN: call
   %13 = call double @log10(double 3.000000e+00)
-  store double %13, double* %slot
+  store double %13, ptr %slot
 ; FNOBUILTIN: call
   %14 = call double @pow(double 3.000000e+00, double 4.000000e+00)
-  store double %14, double* %slot
+  store double %14, ptr %slot
 ; FNOBUILTIN: call
   %round_val = call double @round(double 3.000000e+00)
-  store double %round_val, double* %slot
+  store double %round_val, ptr %slot
 ; FNOBUILTIN: call
   %15 = call double @sinh(double 3.000000e+00)
-  store double %15, double* %slot
+  store double %15, ptr %slot
 ; FNOBUILTIN: call
   %16 = call double @tanh(double 3.000000e+00)
-  store double %16, double* %slot
+  store double %16, ptr %slot
 ; FNOBUILTIN: call
   %17 = call float @acosf(float 1.000000e+00)
-  store float %17, float* %slotf
+  store float %17, ptr %slotf
 ; FNOBUILTIN: call
   %18 = call float @asinf(float 1.000000e+00)
-  store float %18, float* %slotf
+  store float %18, ptr %slotf
 ; FNOBUILTIN: call
   %19 = call float @atanf(float 3.000000e+00)
-  store float %19, float* %slotf
+  store float %19, ptr %slotf
 ; FNOBUILTIN: call
   %20 = call float @atan2f(float 3.000000e+00, float 4.000000e+00)
-  store float %20, float* %slotf
+  store float %20, ptr %slotf
 ; FNOBUILTIN: call
   %21 = call float @ceilf(float 3.000000e+00)
-  store float %21, float* %slotf
+  store float %21, ptr %slotf
 ; FNOBUILTIN: call
   %22 = call float @cosf(float 3.000000e+00)
-  store float %22, float* %slotf
+  store float %22, ptr %slotf
 ; FNOBUILTIN: call
   %23 = call float @coshf(float 3.000000e+00)
-  store float %23, float* %slotf
+  store float %23, ptr %slotf
 ; FNOBUILTIN: call
   %24 = call float @expf(float 3.000000e+00)
-  store float %24, float* %slotf
+  store float %24, ptr %slotf
 ; FNOBUILTIN: call
   %25 = call float @exp2f(float 3.000000e+00)
-  store float %25, float* %slotf
+  store float %25, ptr %slotf
 ; FNOBUILTIN: call
   %26 = call float @fabsf(float 3.000000e+00)
-  store float %26, float* %slotf
+  store float %26, ptr %slotf
 ; FNOBUILTIN: call
   %27 = call float @floorf(float 3.000000e+00)
-  store float %27, float* %slotf
+  store float %27, ptr %slotf
 ; FNOBUILTIN: call
   %28 = call float @fmodf(float 3.000000e+00, float 4.000000e+00)
-  store float %28, float* %slotf
+  store float %28, ptr %slotf
 ; FNOBUILTIN: call
   %29 = call float @logf(float 3.000000e+00)
-  store float %29, float* %slotf
+  store float %29, ptr %slotf
 ; FNOBUILTIN: call
   %30 = call float @log10f(float 3.000000e+00)
-  store float %30, float* %slotf
+  store float %30, ptr %slotf
 ; FNOBUILTIN: call
   %31 = call float @powf(float 3.000000e+00, float 4.000000e+00)
-  store float %31, float* %slotf
+  store float %31, ptr %slotf
 ; FNOBUILTIN: call
   %roundf_val = call float @roundf(float 3.000000e+00)
-  store float %roundf_val, float* %slotf
+  store float %roundf_val, ptr %slotf
 ; FNOBUILTIN: call
   %32 = call float @sinf(float 3.000000e+00)
-  store float %32, float* %slotf
+  store float %32, ptr %slotf
 ; FNOBUILTIN: call
   %33 = call float @sinhf(float 3.000000e+00)
-  store float %33, float* %slotf
+  store float %33, ptr %slotf
 ; FNOBUILTIN: call
   %34 = call float @sqrtf(float 3.000000e+00)
-  store float %34, float* %slotf
+  store float %34, ptr %slotf
 ; FNOBUILTIN: call
   %35 = call float @tanf(float 3.000000e+00)
-  store float %35, float* %slotf
+  store float %35, ptr %slotf
 ; FNOBUILTIN: call
   %36 = call float @tanhf(float 3.000000e+00)
-  store float %36, float* %slotf
+  store float %36, ptr %slotf
 
 ; FNOBUILTIN: ret
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/cast-vector.ll b/llvm/test/Transforms/InstSimplify/ConstProp/cast-vector.ll
index 9e468d9c232e0..685dfcc3cf2a5 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/cast-vector.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/cast-vector.ll
@@ -8,11 +8,11 @@
 define <2 x i16> @test1() {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret <2 x i16> ptrtoint (<2 x i32*> getelementptr ([10 x i32], [10 x i32]* null, <2 x i64> zeroinitializer, <2 x i64> <i64 5, i64 7>) to <2 x i16>)
+; CHECK-NEXT:    ret <2 x i16> ptrtoint (<2 x ptr> getelementptr ([10 x i32], ptr null, <2 x i64> zeroinitializer, <2 x i64> <i64 5, i64 7>) to <2 x i16>)
 ;
 entry:
-  %gep = getelementptr inbounds [10 x i32], [10 x i32]* null, i16 0, <2 x i16> <i16 5, i16 7>
-  %vec = ptrtoint <2 x i32*> %gep to <2 x i16>
+  %gep = getelementptr inbounds [10 x i32], ptr null, i16 0, <2 x i16> <i16 5, i16 7>
+  %vec = ptrtoint <2 x ptr> %gep to <2 x i16>
   ret <2 x i16> %vec
 }
 
@@ -23,10 +23,10 @@ entry:
 define <2 x i16> @test2() {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret <2 x i16> ptrtoint (<2 x i32*> getelementptr (i32, i32* null, <2 x i64> <i64 5, i64 7>) to <2 x i16>)
+; CHECK-NEXT:    ret <2 x i16> ptrtoint (<2 x ptr> getelementptr (i32, ptr null, <2 x i64> <i64 5, i64 7>) to <2 x i16>)
 ;
 entry:
-  %gep = getelementptr i32, i32* null, <2 x i16> <i16 5, i16 7>
-  %vec = ptrtoint <2 x i32*> %gep to <2 x i16>
+  %gep = getelementptr i32, ptr null, <2 x i16> <i16 5, i16 7>
+  %vec = ptrtoint <2 x ptr> %gep to <2 x i16>
   ret <2 x i16> %vec
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/constant-expr.ll b/llvm/test/Transforms/InstSimplify/ConstProp/constant-expr.ll
index 1088fa6959ad9..eac4971f8a8b8 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/constant-expr.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/constant-expr.ll
@@ -4,60 +4,60 @@
 @Y = external global i8
 @Z = external global i8
 
- at A = global i1 add (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
-; CHECK: @A = global i1 xor (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
- at B = global i1 sub (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)), align 2
-; CHECK: @B = global i1 xor (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
- at C = global i1 mul (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
-; CHECK: @C = global i1 and (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
-
- at D = global i1 sdiv (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
-; CHECK: @D = global i1 icmp ult (i8* @X, i8* @Y)
- at E = global i1 udiv (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
-; CHECK: @E = global i1 icmp ult (i8* @X, i8* @Y)
- at F = global i1 srem (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
+ at A = global i1 add (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+; CHECK: @A = global i1 xor (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+ at B = global i1 sub (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z)), align 2
+; CHECK: @B = global i1 xor (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+ at C = global i1 mul (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+; CHECK: @C = global i1 and (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+
+ at D = global i1 sdiv (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+; CHECK: @D = global i1 icmp ult (ptr @X, ptr @Y)
+ at E = global i1 udiv (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
+; CHECK: @E = global i1 icmp ult (ptr @X, ptr @Y)
+ at F = global i1 srem (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
 ; CHECK: @F = global i1 false 
- at G = global i1 urem (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z))
+ at G = global i1 urem (i1 icmp ult (ptr @X, ptr @Y), i1 icmp ult (ptr @X, ptr @Z))
 ; CHECK: @G = global i1 false 
 
- at H = global i1 icmp ule (i32* bitcast (i8* @X to i32*), i32* bitcast (i8* @Y to i32*))
-; CHECK: @H = global i1 icmp ule (i8* @X, i8* @Y)
+ at H = global i1 icmp ule (ptr @X, ptr @Y)
+; CHECK: @H = global i1 icmp ule (ptr @X, ptr @Y)
 
- at I = global i1 xor (i1 icmp ult (i8* @X, i8* @Y), i1 false)
-; CHECK: @I = global i1 icmp ult (i8* @X, i8* @Y)
- at J = global i1 xor (i1 icmp ult (i8* @X, i8* @Y), i1 true)
-; CHECK: @J = global i1 icmp uge (i8* @X, i8* @Y)
+ at I = global i1 xor (i1 icmp ult (ptr @X, ptr @Y), i1 false)
+; CHECK: @I = global i1 icmp ult (ptr @X, ptr @Y)
+ at J = global i1 xor (i1 icmp ult (ptr @X, ptr @Y), i1 true)
+; CHECK: @J = global i1 icmp uge (ptr @X, ptr @Y)
 
- at K = global i1 icmp eq (i1 icmp ult (i8* @X, i8* @Y), i1 false)
-; CHECK: @K = global i1 icmp uge (i8* @X, i8* @Y)
- at L = global i1 icmp eq (i1 icmp ult (i8* @X, i8* @Y), i1 true)
-; CHECK: @L = global i1 icmp ult (i8* @X, i8* @Y)
- at M = global i1 icmp ne (i1 icmp ult (i8* @X, i8* @Y), i1 true)
-; CHECK: @M = global i1 icmp uge (i8* @X, i8* @Y)
- at N = global i1 icmp ne (i1 icmp ult (i8* @X, i8* @Y), i1 false)
-; CHECK: @N = global i1 icmp ult (i8* @X, i8* @Y)
+ at K = global i1 icmp eq (i1 icmp ult (ptr @X, ptr @Y), i1 false)
+; CHECK: @K = global i1 icmp uge (ptr @X, ptr @Y)
+ at L = global i1 icmp eq (i1 icmp ult (ptr @X, ptr @Y), i1 true)
+; CHECK: @L = global i1 icmp ult (ptr @X, ptr @Y)
+ at M = global i1 icmp ne (i1 icmp ult (ptr @X, ptr @Y), i1 true)
+; CHECK: @M = global i1 icmp uge (ptr @X, ptr @Y)
+ at N = global i1 icmp ne (i1 icmp ult (ptr @X, ptr @Y), i1 false)
+; CHECK: @N = global i1 icmp ult (ptr @X, ptr @Y)
 
- at O = global i1 icmp eq (i32 zext (i1 icmp ult (i8* @X, i8* @Y) to i32), i32 0)
-; CHECK: @O = global i1 icmp uge (i8* @X, i8* @Y)
+ at O = global i1 icmp eq (i32 zext (i1 icmp ult (ptr @X, ptr @Y) to i32), i32 0)
+; CHECK: @O = global i1 icmp uge (ptr @X, ptr @Y)
 
 
 
 ; PR5176
 
 ; CHECK: @T1 = global i1 true
- at T1 = global i1 icmp eq (i64 and (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64), i64 1), i64 0)
+ at T1 = global i1 icmp eq (i64 and (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 192)), i256 64) to i64), i64 1), i64 0)
 
-; CHECK: @T2 = global i1* @B
- at T2 = global i1* inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 192)), i256 128) to i64)) to i1*)
+; CHECK: @T2 = global ptr @B
+ at T2 = global ptr inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 192)), i256 128) to i64)) to ptr)
 
-; CHECK: @T3 = global i64 add (i64 ptrtoint (i1* @B to i64), i64 -1)
- at T3 = global i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64), i64 -1)
+; CHECK: @T3 = global i64 add (i64 ptrtoint (ptr @B to i64), i64 -1)
+ at T3 = global i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 192)), i256 64) to i64), i64 -1)
 
-; CHECK: @T4 = global i1* @B
- at T4 = global i1* inttoptr (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 64) to i64) to i1*)
+; CHECK: @T4 = global ptr @B
+ at T4 = global ptr inttoptr (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 192)), i256 64) to i64) to ptr)
 
-; CHECK: @T5 = global i1* @A
- at T5 = global i1* inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (i1* @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (i1* @A to i64) to i256), i256 192)), i256 128) to i64)) to i1*)
+; CHECK: @T5 = global ptr @A
+ at T5 = global ptr inttoptr (i64 add (i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 192)), i256 192) to i64), i64 trunc (i256 lshr (i256 or (i256 and (i256 and (i256 shl (i256 zext (i64 ptrtoint (ptr @B to i64) to i256), i256 64), i256 -6277101735386680763495507056286727952638980837032266301441), i256 6277101735386680763835789423207666416102355444464034512895), i256 shl (i256 zext (i64 ptrtoint (ptr @A to i64) to i256), i256 192)), i256 128) to i64)) to ptr)
 
 
 
@@ -102,10 +102,10 @@
 ; CHECK: pr9011_15 = constant i128 0
 
 @select = internal constant
-          i32 select (i1 icmp ult (i32 ptrtoint (i8* @X to i32),
-                                   i32 ptrtoint (i8* @Y to i32)),
-            i32 select (i1 icmp ult (i32 ptrtoint (i8* @X to i32),
-                                     i32 ptrtoint (i8* @Y to i32)),
+          i32 select (i1 icmp ult (i32 ptrtoint (ptr @X to i32),
+                                   i32 ptrtoint (ptr @Y to i32)),
+            i32 select (i1 icmp ult (i32 ptrtoint (ptr @X to i32),
+                                     i32 ptrtoint (ptr @Y to i32)),
                i32 10, i32 20),
             i32 30)
 ; CHECK: select = internal constant i32 select {{.*}} i32 10, i32 30

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/div-zero.ll b/llvm/test/Transforms/InstSimplify/ConstProp/div-zero.ll
index f4049a9615a8a..22b4d8e35f2eb 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/div-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/div-zero.ll
@@ -5,7 +5,7 @@ declare void @ext()
 define i32 @foo(i32 %ptr) {
 entry:
         %zero = sub i32 %ptr, %ptr              ; <i32> [#uses=1]
-        %div_zero = sdiv i32 %zero, ptrtoint (i32* getelementptr (i32, i32* null,
+        %div_zero = sdiv i32 %zero, ptrtoint (ptr getelementptr (i32, ptr null,
 i32 1) to i32)             ; <i32> [#uses=1]
         ret i32 %div_zero
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/float-to-ptr-cast.ll b/llvm/test/Transforms/InstSimplify/ConstProp/float-to-ptr-cast.ll
index a0c82b21ccb4a..7577e55fd357a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/float-to-ptr-cast.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/float-to-ptr-cast.ll
@@ -1,15 +1,15 @@
 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
 
-define i32* @test1() {
-        %X = inttoptr i64 0 to i32*             ; <i32*> [#uses=1]
-        ret i32* %X
+define ptr @test1() {
+        %X = inttoptr i64 0 to ptr             ; <ptr> [#uses=1]
+        ret ptr %X
 }
 
-; CHECK:  ret i32* null
+; CHECK:  ret ptr null
 
-define i32* @test2() {
-        ret i32* null
+define ptr @test2() {
+        ret ptr null
 }
 
-; CHECK:  ret i32* null
+; CHECK:  ret ptr null
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/freeze.ll b/llvm/test/Transforms/InstSimplify/ConstProp/freeze.ll
index 472fcd9138588..2e27744d0a199 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/freeze.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/freeze.ll
@@ -6,10 +6,10 @@
 
 define i64 @ptr
diff 1() {
 ; CHECK-LABEL: @ptr
diff 1(
-; CHECK-NEXT:    ret i64 sub (i64 ptrtoint (i16* @g to i64), i64 ptrtoint (i16* @g2 to i64))
+; CHECK-NEXT:    ret i64 sub (i64 ptrtoint (ptr @g to i64), i64 ptrtoint (ptr @g2 to i64))
 ;
-  %i = ptrtoint i16* @g to i64
-  %i2 = ptrtoint i16* @g2 to i64
+  %i = ptrtoint ptr @g to i64
+  %i2 = ptrtoint ptr @g2 to i64
   %
diff  = sub i64 %i, %i2
   %r = freeze i64 %
diff 
   ret i64 %r
@@ -19,9 +19,9 @@ define i64 @ptr
diff 2() {
 ; CHECK-LABEL: @ptr
diff 2(
 ; CHECK-NEXT:    ret i64 -2
 ;
-  %i = ptrtoint i16* @g to i64
-  %gep = getelementptr i16, i16* @g, i64 1
-  %i2 = ptrtoint i16* %gep to i64
+  %i = ptrtoint ptr @g to i64
+  %gep = getelementptr i16, ptr @g, i64 1
+  %i2 = ptrtoint ptr %gep to i64
   %
diff  = sub i64 %i, %i2
   %r = freeze i64 %
diff 
   ret i64 %r

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias-gep-load.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias-gep-load.ll
index 703f242739e6d..944c710c20966 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias-gep-load.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias-gep-load.ll
@@ -1,35 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
 
- at a1 = internal alias i32, getelementptr ({[3 x i32]}, {[3 x i32]}* @b, i32 0, i32 0, i32 1)
- at a2 = weak alias i32, getelementptr ({[3 x i32]}, {[3 x i32]}* @b, i32 0, i32 0, i32 1)
+ at a1 = internal alias i32, getelementptr ({[3 x i32]}, ptr @b, i32 0, i32 0, i32 1)
+ at a2 = weak alias i32, getelementptr ({[3 x i32]}, ptr @b, i32 0, i32 0, i32 1)
 @b = internal constant {[3 x i32]} {[3 x i32] [i32 2, i32 3, i32 4]}
 
- at c = internal alias i32, getelementptr ({[3 x i32]}, {[3 x i32]}* @d, i32 0, i32 0, i32 1)
+ at c = internal alias i32, getelementptr ({[3 x i32]}, ptr @d, i32 0, i32 0, i32 1)
 @d = weak constant {[3 x i32]} {[3 x i32] [i32 2, i32 3, i32 4]}
 
 define i32 @f() {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:    ret i32 4
 ;
-  %a = load i32, i32* getelementptr (i32, i32* @a1, i64 1)
+  %a = load i32, ptr getelementptr (i32, ptr @a1, i64 1)
   ret i32 %a
 }
 
 define i32 @g() {
 ; CHECK-LABEL: @g(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* getelementptr (i32, i32* @a2, i64 1), align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr getelementptr (i32, ptr @a2, i64 1), align 4
 ; CHECK-NEXT:    ret i32 [[A]]
 ;
-  %a = load i32, i32* getelementptr (i32, i32* @a2, i64 1)
+  %a = load i32, ptr getelementptr (i32, ptr @a2, i64 1)
   ret i32 %a
 }
 
 define i32 @h() {
 ; CHECK-LABEL: @h(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* getelementptr (i32, i32* @c, i64 1), align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr getelementptr (i32, ptr @c, i64 1), align 4
 ; CHECK-NEXT:    ret i32 [[A]]
 ;
-  %a = load i32, i32* getelementptr (i32, i32* @c, i64 1)
+  %a = load i32, ptr getelementptr (i32, ptr @c, i64 1)
   ret i32 %a
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
index 4b3fcf6679795..b987f8b9ac933 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
@@ -8,10 +8,10 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
- at a = internal global [3 x i8*] zeroinitializer
- at b = linkonce_odr alias [3 x i8*], [3 x i8*]* @a
+ at a = internal global [3 x ptr] zeroinitializer
+ at b = linkonce_odr alias [3 x ptr], ptr @a
 
-define i8** @f() {
-  ; CHECK: ret i8** getelementptr ([3 x i8*], [3 x i8*]* @b, i64 0, i64 1)
-  ret i8** getelementptr ([3 x i8*], [3 x i8*]* @b, i64 0, i64 1)
+define ptr @f() {
+  ; CHECK: ret ptr getelementptr ([3 x ptr], ptr @b, i64 0, i64 1)
+  ret ptr getelementptr ([3 x ptr], ptr @b, i64 0, i64 1)
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
index a9ba2caa057ab..ba0eb4bce25ad 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
@@ -10,7 +10,7 @@ target triple = "armv7-none-eabi"
 @t6 = local_unnamed_addr global i32 1, align 4
 @j = local_unnamed_addr global [6 x [6 x [7 x i8]]] [[6 x [7 x i8]] [[7 x i8] c"\06\00\00\00\00\00\00", [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer, [7 x i8] zeroinitializer], [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer, [6 x [7 x i8]] zeroinitializer], align 1
 @p = internal global i64 0, align 8
- at y = local_unnamed_addr global i64* @p, align 4
+ at y = local_unnamed_addr global ptr @p, align 4
 @b = internal unnamed_addr global i32 0, align 4
 @h = common local_unnamed_addr global i16 0, align 2
 @a = common local_unnamed_addr global i32 0, align 4
@@ -20,33 +20,33 @@ target triple = "armv7-none-eabi"
 ; Function Attrs: nounwind
 define i32 @main() local_unnamed_addr {
 entry:
-  %0 = load i32, i32* @t6, align 4
+  %0 = load i32, ptr @t6, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @t6, align 4
-  store i16 4, i16* @h, align 2
-  %1 = load i32, i32* @a, align 4
+  store i32 %inc, ptr @t6, align 4
+  store i16 4, ptr @h, align 2
+  %1 = load i32, ptr @a, align 4
   %conv = trunc i32 %1 to i8
-  store i32 1, i32* @f, align 4
-  %2 = load i64, i64* @p, align 8
+  store i32 1, ptr @f, align 4
+  %2 = load i64, ptr @p, align 8
   %cmp4 = icmp slt i64 %2, 2
   %conv6 = zext i1 %cmp4 to i8
-  %3 = load i16, i16* @h, align 2
+  %3 = load i16, ptr @h, align 2
   %conv7 = sext i16 %3 to i32
   %add = add nsw i32 %conv7, 1
-  %f.promoted = load i32, i32* @f, align 4
+  %f.promoted = load i32, ptr @f, align 4
   %4 = mul i32 %conv7, 7
   %5 = add i32 %4, 5
   %6 = sub i32 -1, %f.promoted
   %7 = icmp sgt i32 %6, -2
   %smax = select i1 %7, i32 %6, i32 -2
   %8 = sub i32 6, %smax
-  %scevgep = getelementptr [6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i32 0, i32 0, i32 %5, i32 %8
+  %scevgep = getelementptr [6 x [6 x [7 x i8]]], ptr @j, i32 0, i32 0, i32 %5, i32 %8
   %9 = add i32 %f.promoted, %smax
   %10 = add i32 %9, 2
-  call void @llvm.memset.p0i8.i32(i8* %scevgep, i8 %conv6, i32 %10, i1 false)
-; CHECK:  call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i32 0, i{{32|64}} 5, i{{32|64}} 4, i32 1), i8 %conv6, i32 1, i1 false)
-; CHECK-NOT: call void @llvm.memset.p0i8.i32(i8* getelementptr ([6 x [6 x [7 x i8]]], [6 x [6 x [7 x i8]]]* @j, i64 1, i64 4, i64 4, i32 1)
+  call void @llvm.memset.p0.i32(ptr %scevgep, i8 %conv6, i32 %10, i1 false)
+; CHECK:  call void @llvm.memset.p0.i32(ptr getelementptr inbounds ([6 x [6 x [7 x i8]]], ptr @j, i32 0, i{{32|64}} 5, i{{32|64}} 4, i32 1), i8 %conv6, i32 1, i1 false)
+; CHECK-NOT: call void @llvm.memset.p0.i32(ptr getelementptr ([6 x [6 x [7 x i8]]], ptr @j, i64 1, i64 4, i64 4, i32 1)
   ret i32 0
 }
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-zeroinit-vector.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-zeroinit-vector.ll
index bbb2b56b904fc..bce07b0775620 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-zeroinit-vector.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-zeroinit-vector.ll
@@ -7,13 +7,13 @@
 %rec8 = type { i16 }
 @a = global [1 x %rec8] zeroinitializer
 
-define <2 x i16*> @test_gep() {
+define <2 x ptr> @test_gep() {
 ; CHECK-LABEL: @test_gep(
-; CHECK-NEXT:    ret <2 x i16*> <i16* getelementptr inbounds ([1 x %rec8], [1 x %rec8]* @a, i32 0, i32 0, i32 0), i16* getelementptr inbounds ([1 x %rec8], [1 x %rec8]* @a, i32 0, i32 0, i32 0)>
+; CHECK-NEXT:    ret <2 x ptr> <ptr @a, ptr @a>
 ;
-  %A = getelementptr [1 x %rec8], [1 x %rec8]* @a, <2 x i16> zeroinitializer, <2 x i64> zeroinitializer
-  %B = bitcast <2 x %rec8*> %A to <2 x i16*>
-  ret <2 x i16*> %B
+  %A = getelementptr [1 x %rec8], ptr @a, <2 x i16> zeroinitializer, <2 x i64> zeroinitializer
+  %B = bitcast <2 x ptr> %A to <2 x ptr>
+  ret <2 x ptr> %B
 }
 
 ; Testcase that verify the cast-of-cast when the outer/second cast is to a

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
index 67f007982a624..f55a18c3b3052 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
@@ -1,65 +1,65 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -instsimplify -S -verify | FileCheck %s
 
-define i1 @ule_null_constexpr(i8* %x) {
+define i1 @ule_null_constexpr(ptr %x) {
 ; CHECK-LABEL: @ule_null_constexpr(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp ule i8 (...)* null, bitcast (i1 (i8*)* @ule_null_constexpr to i8 (...)*)
+  %cmp = icmp ule ptr null, @ule_null_constexpr
   ret i1 %cmp
 }
 
-define i1 @ugt_null_constexpr(i8* %x) {
+define i1 @ugt_null_constexpr(ptr %x) {
 ; CHECK-LABEL: @ugt_null_constexpr(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ugt i8 (...)* null, bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*)
+  %cmp = icmp ugt ptr null, @ugt_null_constexpr
   ret i1 %cmp
 }
 
-define i1 @uge_constexpr_null(i8* %x) {
+define i1 @uge_constexpr_null(ptr %x) {
 ; CHECK-LABEL: @uge_constexpr_null(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp uge i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), null
+  %cmp = icmp uge ptr @ugt_null_constexpr, null
   ret i1 %cmp
 }
 
-define i1 @ult_constexpr_null(i8* %x) {
+define i1 @ult_constexpr_null(ptr %x) {
 ; CHECK-LABEL: @ult_constexpr_null(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ult i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), null
+  %cmp = icmp ult ptr @ugt_null_constexpr, null
   ret i1 %cmp
 }
 
 ; Negative test - we don't know if the constexpr is null.
 
-define i1 @ule_constexpr_null(i8* %x) {
+define i1 @ule_constexpr_null(ptr %x) {
 ; CHECK-LABEL: @ule_constexpr_null(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ule i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), null
+  %cmp = icmp ule ptr @ugt_null_constexpr, null
   ret i1 %cmp
 }
 
 ; Negative test - we don't know if the constexpr is *signed* less-than null.
 
-define i1 @slt_constexpr_null(i8* %x) {
+define i1 @slt_constexpr_null(ptr %x) {
 ; CHECK-LABEL: @slt_constexpr_null(
-; CHECK-NEXT:    ret i1 icmp slt (i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), i8 (...)* null)
+; CHECK-NEXT:    ret i1 icmp slt (ptr @ugt_null_constexpr, ptr null)
 ;
-  %cmp = icmp slt i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), null
+  %cmp = icmp slt ptr @ugt_null_constexpr, null
   ret i1 %cmp
 }
 
 ; Negative test - we don't try to evaluate this comparison of constant expressions.
 
-define i1 @ult_constexpr_constexpr_one(i8* %x) {
+define i1 @ult_constexpr_constexpr_one(ptr %x) {
 ; CHECK-LABEL: @ult_constexpr_constexpr_one(
-; CHECK-NEXT:    ret i1 icmp ult (i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), i8 (...)* inttoptr (i32 1 to i8 (...)*))
+; CHECK-NEXT:    ret i1 icmp ugt (ptr inttoptr (i32 1 to ptr), ptr @ugt_null_constexpr)
 ;
-  %cmp = icmp ult i8 (...)* bitcast (i1 (i8*)* @ugt_null_constexpr to i8 (...)*), inttoptr (i32 1 to i8 (...)*)
+  %cmp = icmp ult ptr @ugt_null_constexpr, inttoptr (i32 1 to ptr)
   ret i1 %cmp
 }
 
@@ -72,7 +72,7 @@ define i1 @global_ne_null() {
 ; CHECK-LABEL: @global_ne_null(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp ne [2 x i32]* @g, null
+  %cmp = icmp ne ptr @g, null
   ret i1 %cmp
 }
 
@@ -80,24 +80,24 @@ define i1 @global_ugt_null() {
 ; CHECK-LABEL: @global_ugt_null(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp ugt [2 x i32]* @g, null
+  %cmp = icmp ugt ptr @g, null
   ret i1 %cmp
 }
 
 define i1 @global_sgt_null() {
 ; CHECK-LABEL: @global_sgt_null(
-; CHECK-NEXT:    ret i1 icmp sgt ([2 x i32]* @g, [2 x i32]* null)
+; CHECK-NEXT:    ret i1 icmp sgt (ptr @g, ptr null)
 ;
-  %cmp = icmp sgt [2 x i32]* @g, null
+  %cmp = icmp sgt ptr @g, null
   ret i1 %cmp
 }
 
 ; Should not fold to true, as the gep computes a null value.
 define i1 @global_out_of_bounds_gep_ne_null() {
 ; CHECK-LABEL: @global_out_of_bounds_gep_ne_null(
-; CHECK-NEXT:    ret i1 icmp ne (i8* getelementptr (i8, i8* @g3, i64 sub (i64 0, i64 ptrtoint (i8* @g3 to i64))), i8* null)
+; CHECK-NEXT:    ret i1 icmp ne (ptr getelementptr (i8, ptr @g3, i64 sub (i64 0, i64 ptrtoint (ptr @g3 to i64))), ptr null)
 ;
-  %cmp = icmp ne i8* getelementptr (i8, i8* @g3, i64 sub (i64 0, i64 ptrtoint (i8* @g3 to i64))), null
+  %cmp = icmp ne ptr getelementptr (i8, ptr @g3, i64 sub (i64 0, i64 ptrtoint (ptr @g3 to i64))), null
   ret i1 %cmp
 }
 
@@ -105,8 +105,8 @@ define i1 @global_inbounds_gep_ne_null() {
 ; CHECK-LABEL: @global_inbounds_gep_ne_null(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp ne [2 x i32]* %gep, null
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp ne ptr %gep, null
   ret i1 %cmp
 }
 
@@ -114,17 +114,17 @@ define i1 @global_gep_ugt_null() {
 ; CHECK-LABEL: @global_gep_ugt_null(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp ugt [2 x i32]* %gep, null
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp ugt ptr %gep, null
   ret i1 %cmp
 }
 
 define i1 @global_gep_sgt_null() {
 ; CHECK-LABEL: @global_gep_sgt_null(
-; CHECK-NEXT:    ret i1 icmp sgt ([2 x i32]* getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 1), [2 x i32]* null)
+; CHECK-NEXT:    ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 1), ptr null)
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp sgt [2 x i32]* %gep, null
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp sgt ptr %gep, null
   ret i1 %cmp
 }
 
@@ -132,28 +132,28 @@ define i1 @global_gep_sgt_null() {
 ; are equal.
 define i1 @null_gep_ne_null() {
 ; CHECK-LABEL: @null_gep_ne_null(
-; CHECK-NEXT:    ret i1 icmp ne (i8* getelementptr (i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)), i8* null)
+; CHECK-NEXT:    ret i1 icmp ne (ptr getelementptr (i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)), ptr null)
 ;
-  %gep = getelementptr i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)
-  %cmp = icmp ne i8* %gep, null
+  %gep = getelementptr i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)
+  %cmp = icmp ne ptr %gep, null
   ret i1 %cmp
 }
 
 define i1 @null_gep_ugt_null() {
 ; CHECK-LABEL: @null_gep_ugt_null(
-; CHECK-NEXT:    ret i1 icmp ugt (i8* getelementptr (i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)), i8* null)
+; CHECK-NEXT:    ret i1 icmp ugt (ptr getelementptr (i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)), ptr null)
 ;
-  %gep = getelementptr i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)
-  %cmp = icmp ugt i8* %gep, null
+  %gep = getelementptr i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)
+  %cmp = icmp ugt ptr %gep, null
   ret i1 %cmp
 }
 
 define i1 @null_gep_sgt_null() {
 ; CHECK-LABEL: @null_gep_sgt_null(
-; CHECK-NEXT:    ret i1 icmp sgt (i8* getelementptr (i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)), i8* null)
+; CHECK-NEXT:    ret i1 icmp sgt (ptr getelementptr (i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)), ptr null)
 ;
-  %gep = getelementptr i8, i8* null, i64 ptrtoint (i32* @g2_weak to i64)
-  %cmp = icmp sgt i8* %gep, null
+  %gep = getelementptr i8, ptr null, i64 ptrtoint (ptr @g2_weak to i64)
+  %cmp = icmp sgt ptr %gep, null
   ret i1 %cmp
 }
 
@@ -161,8 +161,8 @@ define i1 @null_gep_ne_null_constant_int() {
 ; CHECK-LABEL: @null_gep_ne_null_constant_int(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr i8, i8* null, i64 1
-  %cmp = icmp ne i8* %gep, null
+  %gep = getelementptr i8, ptr null, i64 1
+  %cmp = icmp ne ptr %gep, null
   ret i1 %cmp
 }
 
@@ -170,35 +170,35 @@ define i1 @null_gep_ugt_null_constant_int() {
 ; CHECK-LABEL: @null_gep_ugt_null_constant_int(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr i8, i8* null, i64 1
-  %cmp = icmp ugt i8* %gep, null
+  %gep = getelementptr i8, ptr null, i64 1
+  %cmp = icmp ugt ptr %gep, null
   ret i1 %cmp
 }
 
 define i1 @null_gep_ne_global() {
 ; CHECK-LABEL: @null_gep_ne_global(
-; CHECK-NEXT:    ret i1 icmp ne (i8* getelementptr (i8, i8* null, i64 ptrtoint (i8* @g3 to i64)), i8* @g3)
+; CHECK-NEXT:    ret i1 icmp ne (ptr getelementptr (i8, ptr null, i64 ptrtoint (ptr @g3 to i64)), ptr @g3)
 ;
-  %gep = getelementptr i8, i8* null, i64 ptrtoint (i8* @g3 to i64)
-  %cmp = icmp ne i8* %gep, @g3
+  %gep = getelementptr i8, ptr null, i64 ptrtoint (ptr @g3 to i64)
+  %cmp = icmp ne ptr %gep, @g3
   ret i1 %cmp
 }
 
 define i1 @null_gep_ult_global() {
 ; CHECK-LABEL: @null_gep_ult_global(
-; CHECK-NEXT:    ret i1 icmp ult (i8* getelementptr (i8, i8* null, i64 ptrtoint (i8* @g3 to i64)), i8* @g3)
+; CHECK-NEXT:    ret i1 icmp ult (ptr getelementptr (i8, ptr null, i64 ptrtoint (ptr @g3 to i64)), ptr @g3)
 ;
-  %gep = getelementptr i8, i8* null, i64 ptrtoint (i8* @g3 to i64)
-  %cmp = icmp ult i8* %gep, @g3
+  %gep = getelementptr i8, ptr null, i64 ptrtoint (ptr @g3 to i64)
+  %cmp = icmp ult ptr %gep, @g3
   ret i1 %cmp
 }
 
 define i1 @null_gep_slt_global() {
 ; CHECK-LABEL: @null_gep_slt_global(
-; CHECK-NEXT:    ret i1 icmp slt ([2 x i32]* getelementptr ([2 x i32], [2 x i32]* null, i64 ptrtoint (i32* @g2 to i64)), [2 x i32]* @g)
+; CHECK-NEXT:    ret i1 icmp slt (ptr getelementptr ([2 x i32], ptr null, i64 ptrtoint (ptr @g2 to i64)), ptr @g)
 ;
-  %gep = getelementptr [2 x i32], [2 x i32]* null, i64 ptrtoint (i32* @g2 to i64)
-  %cmp = icmp slt [2 x i32]* %gep, @g
+  %gep = getelementptr [2 x i32], ptr null, i64 ptrtoint (ptr @g2 to i64)
+  %cmp = icmp slt ptr %gep, @g
   ret i1 %cmp
 }
 
@@ -206,8 +206,8 @@ define i1 @global_gep_ne_global() {
 ; CHECK-LABEL: @global_gep_ne_global(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp ne [2 x i32]* %gep, @g
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp ne ptr %gep, @g
   ret i1 %cmp
 }
 
@@ -215,36 +215,36 @@ define i1 @global_gep_ugt_global() {
 ; CHECK-LABEL: @global_gep_ugt_global(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp ugt [2 x i32]* %gep, @g
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp ugt ptr %gep, @g
   ret i1 %cmp
 }
 
 define i1 @global_gep_sgt_global() {
 ; CHECK-LABEL: @global_gep_sgt_global(
-; CHECK-NEXT:    ret i1 icmp sgt ([2 x i32]* getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 1), [2 x i32]* @g)
+; CHECK-NEXT:    ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 1), ptr @g)
 ;
-  %gep = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 1
-  %cmp = icmp sgt [2 x i32]* %gep, @g
+  %gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
+  %cmp = icmp sgt ptr %gep, @g
   ret i1 %cmp
 }
 
 ; This should not fold to true, as the offset is negative.
 define i1 @global_gep_ugt_global_neg_offset() {
 ; CHECK-LABEL: @global_gep_ugt_global_neg_offset(
-; CHECK-NEXT:    ret i1 icmp ugt ([2 x i32]* getelementptr ([2 x i32], [2 x i32]* @g, i64 -1), [2 x i32]* @g)
+; CHECK-NEXT:    ret i1 icmp ugt (ptr getelementptr ([2 x i32], ptr @g, i64 -1), ptr @g)
 ;
-  %gep = getelementptr [2 x i32], [2 x i32]* @g, i64 -1
-  %cmp = icmp ugt [2 x i32]* %gep, @g
+  %gep = getelementptr [2 x i32], ptr @g, i64 -1
+  %cmp = icmp ugt ptr %gep, @g
   ret i1 %cmp
 }
 
 define i1 @global_gep_sgt_global_neg_offset() {
 ; CHECK-LABEL: @global_gep_sgt_global_neg_offset(
-; CHECK-NEXT:    ret i1 icmp sgt ([2 x i32]* getelementptr ([2 x i32], [2 x i32]* @g, i64 -1), [2 x i32]* @g)
+; CHECK-NEXT:    ret i1 icmp sgt (ptr getelementptr ([2 x i32], ptr @g, i64 -1), ptr @g)
 ;
-  %gep = getelementptr [2 x i32], [2 x i32]* @g, i64 -1
-  %cmp = icmp sgt [2 x i32]* %gep, @g
+  %gep = getelementptr [2 x i32], ptr @g, i64 -1
+  %cmp = icmp sgt ptr %gep, @g
   ret i1 %cmp
 }
 
@@ -252,20 +252,18 @@ define i1 @global_gep_ugt_global_gep() {
 ; CHECK-LABEL: @global_gep_ugt_global_gep(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep1 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 0
-  %gep2 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 1
-  %cmp = icmp ugt i32* %gep2, %gep1
+  %gep2 = getelementptr inbounds [2 x i32], ptr @g, i64 0, i64 1
+  %cmp = icmp ugt ptr %gep2, @g
   ret i1 %cmp
 }
 
 ; Should not fold due to signed comparison.
 define i1 @global_gep_sgt_global_gep() {
 ; CHECK-LABEL: @global_gep_sgt_global_gep(
-; CHECK-NEXT:    ret i1 icmp sgt (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 0, i64 1), i32* getelementptr inbounds ([2 x i32], [2 x i32]* @g, i64 0, i64 0))
+; CHECK-NEXT:    ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 0, i64 1), ptr @g)
 ;
-  %gep1 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 0
-  %gep2 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 1
-  %cmp = icmp sgt i32* %gep2, %gep1
+  %gep2 = getelementptr inbounds [2 x i32], ptr @g, i64 0, i64 1
+  %cmp = icmp sgt ptr %gep2, @g
   ret i1 %cmp
 }
 
@@ -273,11 +271,7 @@ define i1 @global_gep_ugt_global_gep_complex() {
 ; CHECK-LABEL: @global_gep_ugt_global_gep_complex(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %gep1 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 0
-  %gep2 = getelementptr inbounds [2 x i32], [2 x i32]* @g, i64 0, i64 0
-  %gep2.cast = bitcast i32* %gep2 to i8*
-  %gep3 = getelementptr inbounds i8, i8* %gep2.cast, i64 2
-  %gep3.cast = bitcast i8* %gep3 to i32*
-  %cmp = icmp ugt i32* %gep3.cast, %gep1
+  %gep3 = getelementptr inbounds i8, ptr @g, i64 2
+  %cmp = icmp ugt ptr %gep3, @g
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/loads.ll b/llvm/test/Transforms/InstSimplify/ConstProp/loads.ll
index 586a431b81c9d..a857ba3567424 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/loads.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/loads.ll
@@ -13,7 +13,7 @@ define i32 @test1() {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    ret i32 -559038737
 ;
-  %r = load i32, i32* getelementptr ({{i32,i8},i32}, {{i32,i8},i32}* @g1, i32 0, i32 0, i32 0)
+  %r = load i32, ptr getelementptr ({{i32,i8},i32}, ptr @g1, i32 0, i32 0, i32 0)
   ret i32 %r
 }
 
@@ -26,7 +26,7 @@ define i16 @test2() {
 ; BE-LABEL: @test2(
 ; BE-NEXT:    ret i16 -8531
 ;
-  %r = load i16, i16* bitcast(i32* getelementptr ({{i32,i8},i32}, {{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*)
+  %r = load i16, ptr getelementptr ({{i32,i8},i32}, ptr @g1, i32 0, i32 0, i32 0)
   ret i16 %r
 }
 
@@ -37,7 +37,7 @@ define i16 @test2_addrspacecast() {
 ; BE-LABEL: @test2_addrspacecast(
 ; BE-NEXT:    ret i16 -8531
 ;
-  %r = load i16, i16 addrspace(1)* addrspacecast(i32* getelementptr ({{i32,i8},i32}, {{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16 addrspace(1)*)
+  %r = load i16, ptr addrspace(1) addrspacecast(ptr getelementptr ({{i32,i8},i32}, ptr @g1, i32 0, i32 0, i32 0) to ptr addrspace(1))
   ret i16 %r
 }
 
@@ -49,7 +49,7 @@ define i16 @test3() {
 ; BE-LABEL: @test3(
 ; BE-NEXT:    ret i16 -16657
 ;
-  %r = load i16, i16* getelementptr(i16, i16* bitcast(i32* getelementptr ({{i32,i8},i32}, {{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 1)
+  %r = load i16, ptr getelementptr(i16, ptr getelementptr ({{i32,i8},i32}, ptr @g1, i32 0, i32 0, i32 0), i32 1)
   ret i16 %r
 }
 
@@ -61,7 +61,7 @@ define i16 @test4() {
 ; BE-LABEL: @test4(
 ; BE-NEXT:    ret i16 -17920
 ;
-  %r = load i16, i16* getelementptr(i16, i16* bitcast(i32* getelementptr ({{i32,i8},i32}, {{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 2)
+  %r = load i16, ptr getelementptr(i16, ptr getelementptr ({{i32,i8},i32}, ptr @g1, i32 0, i32 0, i32 0), i32 2)
   ret i16 %r
 }
 
@@ -70,7 +70,7 @@ define i64 @test6() {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    ret i64 4607182418800017408
 ;
-  %r = load i64, i64* bitcast(double* @g2 to i64*)
+  %r = load i64, ptr @g2
   ret i64 %r
 }
 
@@ -82,7 +82,7 @@ define i16 @test7() {
 ; BE-LABEL: @test7(
 ; BE-NEXT:    ret i16 16368
 ;
-  %r = load i16, i16* bitcast(double* @g2 to i16*)
+  %r = load i16, ptr @g2
   ret i16 %r
 }
 
@@ -94,7 +94,7 @@ define double @test8() {
 ; BE-LABEL: @test8(
 ; BE-NEXT:    ret double 0xDEADBEEFBA000000
 ;
-  %r = load double, double* bitcast({{i32,i8},i32}* @g1 to double*)
+  %r = load double, ptr @g1
   ret double %r
 }
 
@@ -107,7 +107,7 @@ define i128 @test_i128() {
 ; BE-LABEL: @test_i128(
 ; BE-NEXT:    ret i128 2268949521066387161080
 ;
-  %r = load i128, i128* bitcast({i64, i64}* @g3 to i128*)
+  %r = load i128, ptr @g3
   ret i128 %r
 }
 
@@ -118,7 +118,7 @@ define fp128 @test_fp128() {
 ; BE-LABEL: @test_fp128(
 ; BE-NEXT:    ret fp128 0xL0000000006B1BFF8000000000000007B
 ;
-  %r = load fp128, fp128* bitcast({i64, i64}* @g3 to fp128*)
+  %r = load fp128, ptr @g3
   ret fp128 %r
 }
 
@@ -129,7 +129,7 @@ define ppc_fp128 @test_ppc_fp128() {
 ; BE-LABEL: @test_ppc_fp128(
 ; BE-NEXT:    ret ppc_fp128 bitcast (i128 2268949521066387161080 to ppc_fp128)
 ;
-  %r = load ppc_fp128, ppc_fp128* bitcast({i64, i64}* @g3 to ppc_fp128*)
+  %r = load ppc_fp128, ptr @g3
   ret ppc_fp128 %r
 }
 
@@ -140,7 +140,7 @@ define x86_fp80 @test_x86_fp80() {
 ; BE-LABEL: @test_x86_fp80(
 ; BE-NEXT:    ret x86_fp80 0xK000000000000007B0000
 ;
-  %r = load x86_fp80, x86_fp80* bitcast({i64, i64}* @g3 to x86_fp80*)
+  %r = load x86_fp80, ptr @g3
   ret x86_fp80 %r
 }
 
@@ -151,7 +151,7 @@ define bfloat @test_bfloat() {
 ; BE-LABEL: @test_bfloat(
 ; BE-NEXT:    ret bfloat 0xR0000
 ;
-  %r = load bfloat, bfloat* bitcast({i64, i64}* @g3 to bfloat*)
+  %r = load bfloat, ptr @g3
   ret bfloat %r
 }
 
@@ -160,7 +160,7 @@ define <2 x i64> @test10() {
 ; CHECK-LABEL: @test10(
 ; CHECK-NEXT:    ret <2 x i64> <i64 123, i64 112312312>
 ;
-  %r = load <2 x i64>, <2 x i64>* bitcast({i64, i64}* @g3 to <2 x i64>*)
+  %r = load <2 x i64>, ptr @g3
   ret <2 x i64> %r
 }
 
@@ -179,7 +179,7 @@ define i16 @test11() nounwind {
 ; BE-NEXT:    ret i16 -24312
 ;
 entry:
-  %a = load i16, i16* bitcast ({ i8, i8 }* @g4 to i16*)
+  %a = load i16, ptr @g4
   ret i16 %a
 }
 
@@ -194,7 +194,7 @@ define i16 @test12() {
 ; BE-LABEL: @test12(
 ; BE-NEXT:    ret i16 25088
 ;
-  %a = load i16, i16* getelementptr inbounds ([3 x i16], [3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1)
+  %a = load i16, ptr getelementptr inbounds ([3 x i16], ptr @test12g, i32 0, i64 1)
   ret i16 %a
 }
 
@@ -205,30 +205,30 @@ define i1 @test13() {
 ; CHECK-LABEL: @test13(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %A = load i1, i1* bitcast (i8* @g5 to i1*)
+  %A = load i1, ptr @g5
   ret i1 %A
 }
 
- at g6 = constant [2 x i8*] [i8* inttoptr (i64 1 to i8*), i8* inttoptr (i64 2 to i8*)]
+ at g6 = constant [2 x ptr] [ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr)]
 define i64 @test14() nounwind {
 ; CHECK-LABEL: @test14(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    ret i64 1
 ;
 entry:
-  %tmp = load i64, i64* bitcast ([2 x i8*]* @g6 to i64*)
+  %tmp = load i64, ptr @g6
   ret i64 %tmp
 }
 
 ; Check with address space pointers
- at g6_as1 = constant [2 x i8 addrspace(1)*] [i8 addrspace(1)* inttoptr (i16 1 to i8 addrspace(1)*), i8 addrspace(1)* inttoptr (i16 2 to i8 addrspace(1)*)]
+ at g6_as1 = constant [2 x ptr addrspace(1)] [ptr addrspace(1) inttoptr (i16 1 to ptr addrspace(1)), ptr addrspace(1) inttoptr (i16 2 to ptr addrspace(1))]
 define i16 @test14_as1() nounwind {
 ; CHECK-LABEL: @test14_as1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    ret i16 1
 ;
 entry:
-  %tmp = load i16, i16* bitcast ([2 x i8 addrspace(1)*]* @g6_as1 to i16*)
+  %tmp = load i16, ptr @g6_as1
   ret i16 %tmp
 }
 
@@ -238,16 +238,16 @@ define i64 @test15() nounwind {
 ; CHECK-NEXT:    ret i64 2
 ;
 entry:
-  %tmp = load i64, i64* bitcast (i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @g6, i32 0, i64 1) to i64*)
+  %tmp = load i64, ptr getelementptr inbounds ([2 x ptr], ptr @g6, i32 0, i64 1)
   ret i64 %tmp
 }
 
- at gv7 = constant [4 x i8*] [i8* null, i8* inttoptr (i64 -14 to i8*), i8* null, i8* null]
+ at gv7 = constant [4 x ptr] [ptr null, ptr inttoptr (i64 -14 to ptr), ptr null, ptr null]
 define i64 @test16.1() {
 ; CHECK-LABEL: @test16.1(
 ; CHECK-NEXT:    ret i64 0
 ;
-  %v = load i64, i64* bitcast ([4 x i8*]* @gv7 to i64*), align 8
+  %v = load i64, ptr @gv7, align 8
   ret i64 %v
 }
 
@@ -255,7 +255,7 @@ define i64 @test16.2() {
 ; CHECK-LABEL: @test16.2(
 ; CHECK-NEXT:    ret i64 -14
 ;
-  %v = load i64, i64* bitcast (i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @gv7, i64 0, i64 1) to i64*), align 8
+  %v = load i64, ptr getelementptr inbounds ([4 x ptr], ptr @gv7, i64 0, i64 1), align 8
   ret i64 %v
 }
 
@@ -263,18 +263,18 @@ define i64 @test16.3() {
 ; CHECK-LABEL: @test16.3(
 ; CHECK-NEXT:    ret i64 0
 ;
-  %v = load i64, i64* bitcast (i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @gv7, i64 0, i64 2) to i64*), align 8
+  %v = load i64, ptr getelementptr inbounds ([4 x ptr], ptr @gv7, i64 0, i64 2), align 8
   ret i64 %v
 }
 
- at g7 = constant {[0 x i32], [0 x i8], {}*} { [0 x i32] undef, [0 x i8] undef, {}* null }
+ at g7 = constant {[0 x i32], [0 x i8], ptr} { [0 x i32] undef, [0 x i8] undef, ptr null }
 
-define i64* @test_leading_zero_size_elems() {
+define ptr @test_leading_zero_size_elems() {
 ; CHECK-LABEL: @test_leading_zero_size_elems(
-; CHECK-NEXT:    ret i64* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %v = load i64*, i64** bitcast ({[0 x i32], [0 x i8], {}*}* @g7 to i64**)
-  ret i64* %v
+  %v = load ptr, ptr @g7
+  ret ptr %v
 }
 
 @g8 = constant {[4294967295 x [0 x i32]], i64} { [4294967295 x [0 x i32]] undef, i64 123 }
@@ -283,7 +283,7 @@ define i64 @test_leading_zero_size_elems_big() {
 ; CHECK-LABEL: @test_leading_zero_size_elems_big(
 ; CHECK-NEXT:    ret i64 123
 ;
-  %v = load i64, i64* bitcast ({[4294967295 x [0 x i32]], i64}* @g8 to i64*)
+  %v = load i64, ptr @g8
   ret i64 %v
 }
 
@@ -293,45 +293,45 @@ define i64 @test_array_of_zero_size_array() {
 ; CHECK-LABEL: @test_array_of_zero_size_array(
 ; CHECK-NEXT:    ret i64 undef
 ;
-  %v = load i64, i64* bitcast ([4294967295 x [0 x i32]]* @g9 to i64*)
+  %v = load i64, ptr @g9
   ret i64 %v
 }
 
 @g_undef = constant { i128 } undef
 
-define i32* @test_undef_aggregate() {
+define ptr @test_undef_aggregate() {
 ; CHECK-LABEL: @test_undef_aggregate(
-; CHECK-NEXT:    ret i32* undef
+; CHECK-NEXT:    ret ptr undef
 ;
-  %v = load i32*, i32** bitcast ({i128}* @g_undef to i32**)
-  ret i32* %v
+  %v = load ptr, ptr @g_undef
+  ret ptr %v
 }
 
 @g_poison = constant { i128 } poison
 
-define i32* @test_poison_aggregate() {
+define ptr @test_poison_aggregate() {
 ; CHECK-LABEL: @test_poison_aggregate(
-; CHECK-NEXT:    ret i32* poison
+; CHECK-NEXT:    ret ptr poison
 ;
-  %v = load i32*, i32** bitcast ({i128}* @g_poison to i32**)
-  ret i32* %v
+  %v = load ptr, ptr @g_poison
+  ret ptr %v
 }
 
 @g11 = constant <{ [8 x i8], [8 x i8] }> <{ [8 x i8] undef, [8 x i8] zeroinitializer }>, align 4
 
-define {}* @test_trailing_zero_gep_index() {
+define ptr @test_trailing_zero_gep_index() {
 ; CHECK-LABEL: @test_trailing_zero_gep_index(
-; CHECK-NEXT:    ret {}* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %v = load {}*, {}** bitcast (i8* getelementptr inbounds (<{ [8 x i8], [8 x i8] }>, <{ [8 x i8], [8 x i8] }>* @g11, i32 0, i32 1, i32 0) to {}**), align 4
-  ret {}* %v
+  %v = load ptr, ptr getelementptr inbounds (<{ [8 x i8], [8 x i8] }>, ptr @g11, i32 0, i32 1, i32 0), align 4
+  ret ptr %v
 }
 
 define { i64, i64 } @test_load_struct() {
 ; CHECK-LABEL: @test_load_struct(
 ; CHECK-NEXT:    ret { i64, i64 } { i64 123, i64 112312312 }
 ;
-  %v = load { i64, i64 }, { i64, i64 }* @g3
+  %v = load { i64, i64 }, ptr @g3
   ret { i64, i64 } %v
 }
 
@@ -341,10 +341,10 @@ define { i64, i64 } @test_load_struct() {
 ; This should not try to create an x86_mmx null value.
 define x86_mmx @load_mmx() {
 ; CHECK-LABEL: @load_mmx(
-; CHECK-NEXT:    [[TEMP:%.*]] = load x86_mmx, x86_mmx* bitcast (i64* getelementptr ([2 x i64], [2 x i64]* @m64, i64 0, i64 ptrtoint (i32* @idx to i64)) to x86_mmx*), align 8
+; CHECK-NEXT:    [[TEMP:%.*]] = load x86_mmx, ptr getelementptr ([2 x i64], ptr @m64, i64 0, i64 ptrtoint (ptr @idx to i64)), align 8
 ; CHECK-NEXT:    ret x86_mmx [[TEMP]]
 ;
-  %temp = load x86_mmx, x86_mmx* bitcast (i64* getelementptr ([2 x i64], [2 x i64]* @m64, i64 0, i64 ptrtoint (i32* @idx to i64)) to x86_mmx*)
+  %temp = load x86_mmx, ptr getelementptr ([2 x i64], ptr @m64, i64 0, i64 ptrtoint (ptr @idx to i64))
   ret x86_mmx %temp
 }
 
@@ -356,7 +356,7 @@ define i8 @load_neg_one_at_unknown_offset() {
 ; CHECK-LABEL: @load_neg_one_at_unknown_offset(
 ; CHECK-NEXT:    ret i8 -1
 ;
-  %v = load i8, i8* getelementptr (<4 x i8>, <4 x i8>* @g_neg_one_vec, i64 0, i64 ptrtoint (i64* @g_offset to i64))
+  %v = load i8, ptr getelementptr (<4 x i8>, ptr @g_neg_one_vec, i64 0, i64 ptrtoint (ptr @g_offset to i64))
   ret i8 %v
 }
 
@@ -366,7 +366,7 @@ define i32 @load_padding() {
 ; CHECK-LABEL: @load_padding(
 ; CHECK-NEXT:    ret i32 undef
 ;
-  %v = load i32, i32* getelementptr (i32, i32* bitcast ({ i32, [4 x i8] }* @g_with_padding to i32*), i64 1)
+  %v = load i32, ptr getelementptr (i32, ptr @g_with_padding, i64 1)
   ret i32 %v
 }
 
@@ -377,30 +377,30 @@ define i32 @load_all_undef() {
 ; CHECK-LABEL: @load_all_undef(
 ; CHECK-NEXT:    ret i32 undef
 ;
-  %v = load i32, i32* getelementptr (i32, i32* bitcast ({ i32, [4 x i8] }* @g_all_undef to i32*), i64 1)
+  %v = load i32, ptr getelementptr (i32, ptr @g_all_undef, i64 1)
   ret i32 %v
 }
 
 @g_i8_data = constant [16 x i8] c"\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
 
-define i64* @load_ptr_from_i8_data() {
+define ptr @load_ptr_from_i8_data() {
 ; LE-LABEL: @load_ptr_from_i8_data(
-; LE-NEXT:    ret i64* inttoptr (i64 1 to i64*)
+; LE-NEXT:    ret ptr inttoptr (i64 1 to ptr)
 ;
 ; BE-LABEL: @load_ptr_from_i8_data(
-; BE-NEXT:    ret i64* inttoptr (i64 72057594037927936 to i64*)
+; BE-NEXT:    ret ptr inttoptr (i64 72057594037927936 to ptr)
 ;
-  %v = load i64*, i64** bitcast ([16 x i8]* @g_i8_data to i64**)
-  ret i64* %v
+  %v = load ptr, ptr @g_i8_data
+  ret ptr %v
 }
 
-define i64 addrspace(2)* @load_non_integral_ptr_from_i8_data() {
+define ptr addrspace(2) @load_non_integral_ptr_from_i8_data() {
 ; CHECK-LABEL: @load_non_integral_ptr_from_i8_data(
-; CHECK-NEXT:    [[V:%.*]] = load i64 addrspace(2)*, i64 addrspace(2)** bitcast ([16 x i8]* @g_i8_data to i64 addrspace(2)**), align 8
-; CHECK-NEXT:    ret i64 addrspace(2)* [[V]]
+; CHECK-NEXT:    [[V:%.*]] = load ptr addrspace(2), ptr @g_i8_data, align 8
+; CHECK-NEXT:    ret ptr addrspace(2) [[V]]
 ;
-  %v = load i64 addrspace(2)*, i64 addrspace(2)** bitcast ([16 x i8]* @g_i8_data to i64 addrspace(2)**)
-  ret i64 addrspace(2)* %v
+  %v = load ptr addrspace(2), ptr @g_i8_data
+  ret ptr addrspace(2) %v
 }
 
 @g_i1 = constant i1 true
@@ -409,6 +409,6 @@ define i8 @load_i8_from_i1() {
 ; CHECK-LABEL: @load_i8_from_i1(
 ; CHECK-NEXT:    ret i8 -1
 ;
-  %v = load i8, i8* bitcast (i1* @g_i1 to i8*)
+  %v = load i8, ptr @g_i1
   ret i8 %v
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/poison.ll b/llvm/test/Transforms/InstSimplify/ConstProp/poison.ll
index 0fe1858e533d7..49ca79554542f 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/poison.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/poison.ll
@@ -14,7 +14,7 @@ define void @casts() {
   %i4 = fptoui float poison to i8
   %i5 = fptosi float poison to i8
   %i6 = bitcast float poison to i32
-  %i7 = ptrtoint i8* poison to i8
+  %i7 = ptrtoint ptr poison to i8
   %f1 = fptrunc double poison to float
   %f2 = fpext half poison to float
   %f3 = uitofp i8 poison to float
@@ -25,12 +25,12 @@ define void @casts() {
 
 define void @casts2() {
 ; CHECK-LABEL: @casts2(
-; CHECK-NEXT:    call void (...) @use(i8* poison, i8* poison)
+; CHECK-NEXT:    call void (...) @use(ptr poison, ptr poison)
 ; CHECK-NEXT:    ret void
 ;
-  %p1 = inttoptr i8 poison to i8*
-  %p2 = addrspacecast i8 addrspace(1)* poison to i8*
-  call void (...) @use(i8* %p1, i8* %p2)
+  %p1 = inttoptr i8 poison to ptr
+  %p2 = addrspacecast ptr addrspace(1) poison to ptr
+  call void (...) @use(ptr %p1, ptr %p2)
   ret void
 }
 
@@ -104,14 +104,14 @@ define void @vec_aggr_ops() {
 
 define void @other_ops(i8 %x) {
 ; CHECK-LABEL: @other_ops(
-; CHECK-NEXT:    call void (...) @use(i1 poison, i1 poison, i8 poison, i8 poison, i8* poison, i8* poison)
+; CHECK-NEXT:    call void (...) @use(i1 poison, i1 poison, i8 poison, i8 poison, ptr poison, ptr poison)
 ; CHECK-NEXT:    ret void
 ;
   %i1 = icmp eq i8 poison, 1
   %i2 = fcmp oeq float poison, 1.0
   %i3 = select i1 poison, i8 1, i8 2
   %i4 = select i1 true, i8 poison, i8 %x
-  call void (...) @use(i1 %i1, i1 %i2, i8 %i3, i8 %i4, i8* getelementptr (i8, i8* poison, i64 1), i8* getelementptr inbounds (i8, i8* undef, i64 1))
+  call void (...) @use(i1 %i1, i1 %i2, i8 %i3, i8 %i4, ptr getelementptr (i8, ptr poison, i64 1), ptr getelementptr inbounds (i8, ptr undef, i64 1))
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/shift.ll b/llvm/test/Transforms/InstSimplify/ConstProp/shift.ll
index 0fd0e8fa97be6..c6356a810bd2a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/shift.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/shift.ll
@@ -1,69 +1,69 @@
 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
 
 ; CHECK-LABEL: shift_undef_64
-define void @shift_undef_64(i64* %p) {
+define void @shift_undef_64(ptr %p) {
   %r1 = lshr i64 -1, 4294967296 ; 2^32
   ; CHECK: store i64 poison
-  store i64 %r1, i64* %p
+  store i64 %r1, ptr %p
 
   %r2 = ashr i64 -1, 4294967297 ; 2^32 + 1
   ; CHECK: store i64 poison
-  store i64 %r2, i64* %p
+  store i64 %r2, ptr %p
 
   %r3 = shl i64 -1, 4294967298 ; 2^32 + 2
   ; CHECK: store i64 poison
-  store i64 %r3, i64* %p
+  store i64 %r3, ptr %p
 
   ret void
 }
 
 ; CHECK-LABEL: shift_undef_65
-define void @shift_undef_65(i65* %p) {
+define void @shift_undef_65(ptr %p) {
   %r1 = lshr i65 2, 18446744073709551617
   ; CHECK: store i65 poison
-  store i65 %r1, i65* %p
+  store i65 %r1, ptr %p
 
   %r2 = ashr i65 4, 18446744073709551617
   ; CHECK: store i65 poison
-  store i65 %r2, i65* %p
+  store i65 %r2, ptr %p
 
   %r3 = shl i65 1, 18446744073709551617
   ; CHECK: store i65 poison
-  store i65 %r3, i65* %p
+  store i65 %r3, ptr %p
 
   ret void
 }
 
 ; CHECK-LABEL: shift_undef_256
-define void @shift_undef_256(i256* %p) {
+define void @shift_undef_256(ptr %p) {
   %r1 = lshr i256 2, 18446744073709551617
   ; CHECK: store i256 poison
-  store i256 %r1, i256* %p
+  store i256 %r1, ptr %p
 
   %r2 = ashr i256 4, 18446744073709551618
   ; CHECK: store i256 poison
-  store i256 %r2, i256* %p
+  store i256 %r2, ptr %p
 
   %r3 = shl i256 1, 18446744073709551619
   ; CHECK: store i256 poison
-  store i256 %r3, i256* %p
+  store i256 %r3, ptr %p
 
   ret void
 }
 
 ; CHECK-LABEL: shift_undef_511
-define void @shift_undef_511(i511* %p) {
+define void @shift_undef_511(ptr %p) {
   %r1 = lshr i511 -1, 1208925819614629174706276 ; 2^80 + 100
   ; CHECK: store i511 poison
-  store i511 %r1, i511* %p
+  store i511 %r1, ptr %p
 
   %r2 = ashr i511 -2, 1208925819614629174706200
   ; CHECK: store i511 poison
-  store i511 %r2, i511* %p
+  store i511 %r2, ptr %p
 
   %r3 = shl i511 -3, 1208925819614629174706180
   ; CHECK: store i511 poison
-  store i511 %r3, i511* %p
+  store i511 %r3, ptr %p
 
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/timeout.ll b/llvm/test/Transforms/InstSimplify/ConstProp/timeout.ll
index 0ebd365a0335a..00674bb7a3849 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/timeout.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/timeout.ll
@@ -3,33 +3,33 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "armv8-none-eabi"
 
-%struct.ST = type { %struct.ST* }
+%struct.ST = type { ptr }
 
 @global = internal global [121 x i8] zeroinitializer, align 1
 
 define void @func() #0 {
 ;CHECK-LABEL: func
 entry:
-  %s = alloca %struct.ST*, align 4
+  %s = alloca ptr, align 4
   %j = alloca i32, align 4
-  store %struct.ST* bitcast ([121 x i8]* @global to %struct.ST*), %struct.ST** %s, align 4
-  store i32 0, i32* %j, align 4
+  store ptr @global, ptr %s, align 4
+  store i32 0, ptr %j, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %j, align 4
+  %0 = load i32, ptr %j, align 4
   %cmp = icmp slt i32 %0, 30
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %1 = load %struct.ST*, %struct.ST** %s, align 4
-  %2 = bitcast %struct.ST* %1 to i8*
-  %add.ptr = getelementptr inbounds i8, i8* %2, i32 4
-  %3 = ptrtoint i8* %add.ptr to i32
-  %4 = load %struct.ST*, %struct.ST** %s, align 4
-  %5 = bitcast %struct.ST* %4 to i8*
-  %add.ptr1 = getelementptr inbounds i8, i8* %5, i32 4
-  %6 = ptrtoint i8* %add.ptr1 to i32
+  %1 = load ptr, ptr %s, align 4
+  %2 = bitcast ptr %1 to ptr
+  %add.ptr = getelementptr inbounds i8, ptr %2, i32 4
+  %3 = ptrtoint ptr %add.ptr to i32
+  %4 = load ptr, ptr %s, align 4
+  %5 = bitcast ptr %4 to ptr
+  %add.ptr1 = getelementptr inbounds i8, ptr %5, i32 4
+  %6 = ptrtoint ptr %add.ptr1 to i32
   %rem = urem i32 %6, 2
   %cmp2 = icmp eq i32 %rem, 0
   br i1 %cmp2, label %cond.true, label %cond.false
@@ -38,36 +38,36 @@ cond.true:                                        ; preds = %for.body
   br label %cond.end
 
 cond.false:                                       ; preds = %for.body
-  %7 = load %struct.ST*, %struct.ST** %s, align 4
-  %8 = bitcast %struct.ST* %7 to i8*
-  %add.ptr3 = getelementptr inbounds i8, i8* %8, i32 4
-  %9 = ptrtoint i8* %add.ptr3 to i32
+  %7 = load ptr, ptr %s, align 4
+  %8 = bitcast ptr %7 to ptr
+  %add.ptr3 = getelementptr inbounds i8, ptr %8, i32 4
+  %9 = ptrtoint ptr %add.ptr3 to i32
   %rem4 = urem i32 %9, 2
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ 0, %cond.true ], [ %rem4, %cond.false ]
   %add = add i32 %3, %cond
-  %10 = inttoptr i32 %add to %struct.ST*
-  %11 = load %struct.ST*, %struct.ST** %s, align 4
-  %next = getelementptr inbounds %struct.ST, %struct.ST* %11, i32 0, i32 0
-  store %struct.ST* %10, %struct.ST** %next, align 4
-  %12 = load %struct.ST*, %struct.ST** %s, align 4
-  %next5 = getelementptr inbounds %struct.ST, %struct.ST* %12, i32 0, i32 0
-  %13 = load %struct.ST*, %struct.ST** %next5, align 4
-  store %struct.ST* %13, %struct.ST** %s, align 4
+  %10 = inttoptr i32 %add to ptr
+  %11 = load ptr, ptr %s, align 4
+  %next = getelementptr inbounds %struct.ST, ptr %11, i32 0, i32 0
+  store ptr %10, ptr %next, align 4
+  %12 = load ptr, ptr %s, align 4
+  %next5 = getelementptr inbounds %struct.ST, ptr %12, i32 0, i32 0
+  %13 = load ptr, ptr %next5, align 4
+  store ptr %13, ptr %s, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %cond.end
-  %14 = load i32, i32* %j, align 4
+  %14 = load i32, ptr %j, align 4
   %inc = add nsw i32 %14, 1
-  store i32 %inc, i32* %j, align 4
+  store i32 %inc, ptr %j, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  %15 = load %struct.ST*, %struct.ST** %s, align 4
-  %next6 = getelementptr inbounds %struct.ST, %struct.ST* %15, i32 0, i32 0
-  store %struct.ST* null, %struct.ST** %next6, align 4
+  %15 = load ptr, ptr %s, align 4
+  %next6 = getelementptr inbounds %struct.ST, ptr %15, i32 0, i32 0
+  store ptr null, ptr %next6, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll
index 1562d74d83aae..2b6e4615e6cbc 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vectorgep-crash.ll
@@ -10,57 +10,57 @@ target triple = "x86_64-unknown-linux-gnu"
 %Partials.73 = type { [2 x %Dual.72] }
 
 ; Function Attrs: sspreq
-define <8 x i64*> @"julia_axpy!_65480"(%Dual* %arg1, <8 x i64> %arg2) {
+define <8 x ptr> @"julia_axpy!_65480"(ptr %arg1, <8 x i64> %arg2) {
 top:
-; CHECK: %VectorGep14 = getelementptr inbounds %Dual, %Dual* %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
-  %VectorGep14 = getelementptr inbounds %Dual, %Dual* %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
-  %0 = bitcast <8 x double*> %VectorGep14 to <8 x i64*>
-  ret <8 x i64*> %0
+; CHECK: %VectorGep14 = getelementptr inbounds %Dual, ptr %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
+  %VectorGep14 = getelementptr inbounds %Dual, ptr %arg1, <8 x i64> %arg2, i32 1, i32 0, i64 0, i32 1, i32 0, i64 0
+  %0 = bitcast <8 x ptr> %VectorGep14 to <8 x ptr>
+  ret <8 x ptr> %0
 }
 
-%struct.A = type { i32, %struct.B* }
-%struct.B = type { i64, %struct.C* }
+%struct.A = type { i32, ptr }
+%struct.B = type { i64, ptr }
 %struct.C = type { i64 }
 
 @G = internal global [65 x %struct.A] zeroinitializer, align 16
 ; CHECK-LABEL: @test
-; CHECK: ret <16 x i32*> getelementptr ([65 x %struct.A], [65 x %struct.A]* @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
-define <16 x i32*> @test() {
+; CHECK: ret <16 x ptr> getelementptr ([65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, i32 0)
+define <16 x ptr> @test() {
 vector.body:
-  %VectorGep = getelementptr [65 x %struct.A], [65 x %struct.A]* @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
-  ret <16 x i32*> %VectorGep
+  %VectorGep = getelementptr [65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
+  ret <16 x ptr> %VectorGep
 }
 
 ; CHECK-LABEL: @test2
-; CHECK: ret <16 x i32*> getelementptr ([65 x %struct.A], [65 x %struct.A]* @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, 
-define <16 x i32*> @test2() {
+; CHECK: ret <16 x ptr> getelementptr ([65 x %struct.A], ptr @G, <16 x i64> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, 
+define <16 x ptr> @test2() {
 vector.body:
-  %VectorGep = getelementptr [65 x %struct.A], [65 x %struct.A]* @G, <16 x i32> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
-  ret <16 x i32*> %VectorGep
+  %VectorGep = getelementptr [65 x %struct.A], ptr @G, <16 x i32> zeroinitializer, <16 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15, i64 16>, <16 x i32> zeroinitializer
+  ret <16 x ptr> %VectorGep
 }
 
 @g = external global i8, align 1
 
-define <2 x i8*> @constant_zero_index() {
+define <2 x ptr> @constant_zero_index() {
 ; CHECK-LABEL: @constant_zero_index(
-; CHECK-NEXT:    ret <2 x i8*> <i8* @g, i8* @g>
+; CHECK-NEXT:    ret <2 x ptr> <ptr @g, ptr @g>
 ;
-  %gep = getelementptr i8, i8* @g, <2 x i64> zeroinitializer
-  ret <2 x i8*> %gep
+  %gep = getelementptr i8, ptr @g, <2 x i64> zeroinitializer
+  ret <2 x ptr> %gep
 }
 
-define <2 x i8*> @constant_undef_index() {
+define <2 x ptr> @constant_undef_index() {
 ; CHECK-LABEL: @constant_undef_index(
-; CHECK-NEXT:    ret <2 x i8*> <i8* @g, i8* @g>
+; CHECK-NEXT:    ret <2 x ptr> <ptr @g, ptr @g>
 ;
-  %gep = getelementptr i8, i8* @g, <2 x i64> undef
-  ret <2 x i8*> %gep
+  %gep = getelementptr i8, ptr @g, <2 x i64> undef
+  ret <2 x ptr> %gep
 }
 
-define <2 x i8*> @constant_inbounds() {
+define <2 x ptr> @constant_inbounds() {
 ; CHECK-LABEL: @constant_inbounds(
-; CHECK-NEXT:    ret <2 x i8*> getelementptr inbounds (i8, i8* @g, <2 x i64> <i64 1, i64 1>)
+; CHECK-NEXT:    ret <2 x ptr> getelementptr inbounds (i8, ptr @g, <2 x i64> <i64 1, i64 1>)
 ;
-  %gep = getelementptr i8, i8* @g, <2 x i64> <i64 1, i64 1>
-  ret <2 x i8*> %gep
+  %gep = getelementptr i8, ptr @g, <2 x i64> <i64 1, i64 1>
+  ret <2 x ptr> %gep
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/vscale-getelementptr.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vscale-getelementptr.ll
index 5d3d4a44c02c9..6d141acd239fd 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vscale-getelementptr.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vscale-getelementptr.ll
@@ -3,30 +3,30 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64"
 
-; CHECK-LABEL: define <4 x i32*> @fixed_length_version_first() {
-; CHECK-NEXT:  ret <4 x i32*> undef
-define <4 x i32*> @fixed_length_version_first() {
-  %ptr = getelementptr i32, <4 x i32*> undef, <4 x i64> undef
-  ret <4 x i32*> %ptr
+; CHECK-LABEL: define <4 x ptr> @fixed_length_version_first() {
+; CHECK-NEXT:  ret <4 x ptr> undef
+define <4 x ptr> @fixed_length_version_first() {
+  %ptr = getelementptr i32, <4 x ptr> undef, <4 x i64> undef
+  ret <4 x ptr> %ptr
 }
 
-; CHECK-LABEL: define <4 x <4 x i32>*> @fixed_length_version_second() {
-; CHECK-NEXT:  ret <4 x <4 x i32>*> undef
-define <4 x <4 x i32>*> @fixed_length_version_second() {
-  %ptr = getelementptr <4 x i32>, <4 x i32>* undef, <4 x i64> undef
-  ret <4 x <4 x i32>*> %ptr
+; CHECK-LABEL: define <4 x ptr> @fixed_length_version_second() {
+; CHECK-NEXT:  ret <4 x ptr> undef
+define <4 x ptr> @fixed_length_version_second() {
+  %ptr = getelementptr <4 x i32>, ptr undef, <4 x i64> undef
+  ret <4 x ptr> %ptr
 }
 
-; CHECK-LABEL: define <vscale x 4 x i32*> @vscale_version_first() {
-; CHECK-NEXT:  ret <vscale x 4 x i32*> undef
-define <vscale x 4 x i32*> @vscale_version_first() {
-  %ptr = getelementptr i32, <vscale x 4 x i32*> undef, <vscale x 4 x i64> undef
-  ret <vscale x 4 x i32*> %ptr
+; CHECK-LABEL: define <vscale x 4 x ptr> @vscale_version_first() {
+; CHECK-NEXT:  ret <vscale x 4 x ptr> undef
+define <vscale x 4 x ptr> @vscale_version_first() {
+  %ptr = getelementptr i32, <vscale x 4 x ptr> undef, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }
 
-; CHECK-LABEL: define <vscale x 4 x <vscale x 4 x i32>*> @vscale_version_second() {
-; CHECK-NEXT:  ret <vscale x 4 x <vscale x 4 x i32>*> undef
-define <vscale x 4 x <vscale x 4 x i32>*> @vscale_version_second() {
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* undef, <vscale x 4 x i64> undef
-  ret <vscale x 4 x <vscale x 4 x i32>*> %ptr
+; CHECK-LABEL: define <vscale x 4 x ptr> @vscale_version_second() {
+; CHECK-NEXT:  ret <vscale x 4 x ptr> undef
+define <vscale x 4 x ptr> @vscale_version_second() {
+  %ptr = getelementptr <vscale x 4 x i32>, ptr undef, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/vscale-inseltpoison.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vscale-inseltpoison.ll
index 2f5f241bc968c..b388d37590153 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vscale-inseltpoison.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vscale-inseltpoison.ll
@@ -208,10 +208,10 @@ define <vscale x 4 x i32> @shufflevector() {
 
 define <vscale x 2 x double> @load() {
 ; CHECK-LABEL: @load(
-; CHECK-NEXT:    [[R:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* getelementptr (<vscale x 2 x double>, <vscale x 2 x double>* null, i64 1), align 16
+; CHECK-NEXT:    [[R:%.*]] = load <vscale x 2 x double>, ptr getelementptr (<vscale x 2 x double>, ptr null, i64 1), align 16
 ; CHECK-NEXT:    ret <vscale x 2 x double> [[R]]
 ;
-  %r = load <vscale x 2 x double>, <vscale x 2 x double>* getelementptr (<vscale x 2 x double>, <vscale x 2 x double>* null, i64 1)
+  %r = load <vscale x 2 x double>, ptr getelementptr (<vscale x 2 x double>, ptr null, i64 1)
   ret <vscale x 2 x double> %r
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/ConstProp/vscale.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vscale.ll
index a57ec3dd35017..7ef6f8f607695 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vscale.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vscale.ll
@@ -208,10 +208,10 @@ define <vscale x 4 x i32> @shufflevector() {
 
 define <vscale x 2 x double> @load() {
 ; CHECK-LABEL: @load(
-; CHECK-NEXT:    [[R:%.*]] = load <vscale x 2 x double>, <vscale x 2 x double>* getelementptr (<vscale x 2 x double>, <vscale x 2 x double>* null, i64 1), align 16
+; CHECK-NEXT:    [[R:%.*]] = load <vscale x 2 x double>, ptr getelementptr (<vscale x 2 x double>, ptr null, i64 1), align 16
 ; CHECK-NEXT:    ret <vscale x 2 x double> [[R]]
 ;
-  %r = load <vscale x 2 x double>, <vscale x 2 x double>* getelementptr (<vscale x 2 x double>, <vscale x 2 x double>* null, i64 1)
+  %r = load <vscale x 2 x double>, ptr getelementptr (<vscale x 2 x double>, ptr null, i64 1)
   ret <vscale x 2 x double> %r
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/add-mask.ll b/llvm/test/Transforms/InstSimplify/add-mask.ll
index 0669a086d07cd..e4df3ac7351db 100644
--- a/llvm/test/Transforms/InstSimplify/add-mask.ll
+++ b/llvm/test/Transforms/InstSimplify/add-mask.ll
@@ -70,13 +70,13 @@ declare void @llvm.assume(i1)
 ; Known bits without a constant
 define i1 @test4(i32 %a) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[B:%.*]] = load i32, i32* @B, align 4
+; CHECK-NEXT:    [[B:%.*]] = load i32, ptr @B, align 4
 ; CHECK-NEXT:    [[B_AND:%.*]] = and i32 [[B]], 1
 ; CHECK-NEXT:    [[B_CND:%.*]] = icmp eq i32 [[B_AND]], 1
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[B_CND]])
 ; CHECK-NEXT:    ret i1 false
 ;
-  %b = load i32, i32* @B
+  %b = load i32, ptr @B
   %b.and = and i32 %b, 1
   %b.cnd = icmp eq i32 %b.and, 1
   call void @llvm.assume(i1 %b.cnd)

diff  --git a/llvm/test/Transforms/InstSimplify/and-or-icmp-nullptr.ll b/llvm/test/Transforms/InstSimplify/and-or-icmp-nullptr.ll
index e5da52801aad3..ae8e38759d571 100644
--- a/llvm/test/Transforms/InstSimplify/and-or-icmp-nullptr.ll
+++ b/llvm/test/Transforms/InstSimplify/and-or-icmp-nullptr.ll
@@ -16,57 +16,57 @@
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ugt_and_min(i8* %x, i8* %y)  {
+define i1 @ugt_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_and_min(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ugt i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_and_min_commute(<2 x i8>* %x, <2 x i8>* %y)  {
+define i1 @ugt_and_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_and_min_commute(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ugt <2 x i8>* %x, %y
-  %cmpeq = icmp eq <2 x i8>* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ugt_swap_and_min(i8* %x, i8* %y)  {
+define i1 @ugt_swap_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_and_min(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ult i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_swap_and_min_commute(i8* %x, i8* %y)  {
+define i1 @ugt_swap_and_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_and_min_commute(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp ult i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sgt_and_min(i9* %x, i9* %y)  {
+define i1 @sgt_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sgt_and_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i9* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i9* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = and i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sgt i9* %x, %y
-  %cmpeq = icmp eq i9* %x, null
+  %cmp = icmp sgt ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
@@ -77,57 +77,57 @@ define i1 @sgt_and_min(i9* %x, i9* %y)  {
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ule_or_not_min(i427* %x, i427* %y)  {
+define i1 @ule_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_or_not_min(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp ule i427* %x, %y
-  %cmpeq = icmp ne i427* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_or_not_min_commute(<3 x i9>* %x, <3 x i9>* %y)  {
+define i1 @ule_or_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_or_not_min_commute(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp ule <3 x i9>* %x, %y
-  %cmpeq = icmp ne <3 x i9>* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ule_swap_or_not_min(i8* %x, i8* %y)  {
+define i1 @ule_swap_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_or_not_min(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_swap_or_not_min_commute(i8* %x, i8* %y)  {
+define i1 @ule_swap_or_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_or_not_min_commute(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sle_or_not_min(i427* %x, i427* %y)  {
+define i1 @sle_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sle_or_not_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sle i427* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i427* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sle i427* %x, %y
-  %cmpeq = icmp ne i427* %x, null
+  %cmp = icmp sle ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
@@ -138,61 +138,61 @@ define i1 @sle_or_not_min(i427* %x, i427* %y)  {
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ule_and_min(i8* %x, i8* %y)  {
+define i1 @ule_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_and_min(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ule i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_and_min_commute(i8* %x, i8* %y)  {
+define i1 @ule_and_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_and_min_commute(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ule i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ule_swap_and_min(i8* %x, i8* %y)  {
+define i1 @ule_swap_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_and_min(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_swap_and_min_commute(i8* %x, i8* %y)  {
+define i1 @ule_swap_and_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_and_min_commute(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sle_and_min(i8* %x, i8* %y)  {
+define i1 @sle_and_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sle_and_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sle i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = and i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sle i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp sle ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
@@ -203,61 +203,61 @@ define i1 @sle_and_min(i8* %x, i8* %y)  {
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ule_or_min(i8* %x, i8* %y)  {
+define i1 @ule_or_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_or_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ule i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_or_min_commute(i8* %x, i8* %y)  {
+define i1 @ule_or_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_or_min_commute(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ule i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ule ptr [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ule i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp ule ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ule_swap_or_min(i8* %x, i8* %y)  {
+define i1 @ule_swap_or_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_or_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ule_swap_or_min_commute(i8* %x, i8* %y)  {
+define i1 @ule_swap_or_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ule_swap_or_min_commute(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp uge i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp uge ptr [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp uge i8* %y, %x
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp uge ptr %y, %x
+  %cmpeq = icmp eq ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sle_or_min(i8* %x, i8* %y)  {
+define i1 @sle_or_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sle_or_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sle i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sle ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sle i8* %x, %y
-  %cmpeq = icmp eq i8* %x, null
+  %cmp = icmp sle ptr %x, %y
+  %cmpeq = icmp eq ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
@@ -268,61 +268,61 @@ define i1 @sle_or_min(i8* %x, i8* %y)  {
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ugt_and_not_min(i8* %x, i8* %y)  {
+define i1 @ugt_and_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_and_not_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ugt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_and_not_min_commute(i8* %x, i8* %y)  {
+define i1 @ugt_and_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_and_not_min_commute(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8* [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt ptr [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ugt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ugt_swap_and_not_min(i8* %x, i8* %y)  {
+define i1 @ugt_swap_and_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_and_not_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ult i8* %y, %x
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_swap_and_not_min_commute(i8* %x, i8* %y)  {
+define i1 @ugt_swap_and_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_and_not_min_commute(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8* [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp ult i8* %y, %x
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = and i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sgt_and_not_min(i8* %x, i8* %y)  {
+define i1 @sgt_and_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sgt_and_not_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = and i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sgt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp sgt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = and i1 %cmp, %cmpeq
   ret i1 %r
 }
@@ -333,61 +333,61 @@ define i1 @sgt_and_not_min(i8* %x, i8* %y)  {
 ;
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define i1 @ugt_or_not_min(i8* %x, i8* %y)  {
+define i1 @ugt_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_or_not_min(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ugt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_or_not_min_commute(i8* %x, i8* %y)  {
+define i1 @ugt_or_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_or_not_min_commute(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ugt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ugt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
-define i1 @ugt_swap_or_not_min(i8* %x, i8* %y)  {
+define i1 @ugt_swap_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_or_not_min(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ult i8* %y, %x
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }
 
-define i1 @ugt_swap_or_not_min_commute(i823* %x, i823* %y)  {
+define i1 @ugt_swap_or_not_min_commute(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @ugt_swap_or_not_min_commute(
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i823* [[X:%.*]], null
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMPEQ]]
 ;
-  %cmp = icmp ult i823* %y, %x
-  %cmpeq = icmp ne i823* %x, null
+  %cmp = icmp ult ptr %y, %x
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmpeq, %cmp
   ret i1 %r
 }
 
 ; Negative test - signed compare
 
-define i1 @sgt_or_not_min(i8* %x, i8* %y)  {
+define i1 @sgt_or_not_min(ptr %x, ptr %y)  {
 ; CHECK-LABEL: @sgt_or_not_min(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8* [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne i8* [[X]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[CMPEQ:%.*]] = icmp ne ptr [[X]], null
 ; CHECK-NEXT:    [[R:%.*]] = or i1 [[CMP]], [[CMPEQ]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %cmp = icmp sgt i8* %x, %y
-  %cmpeq = icmp ne i8* %x, null
+  %cmp = icmp sgt ptr %x, %y
+  %cmpeq = icmp ne ptr %x, null
   %r = or i1 %cmp, %cmpeq
   ret i1 %r
 }

diff  --git a/llvm/test/Transforms/InstSimplify/and-or-icmp-zero.ll b/llvm/test/Transforms/InstSimplify/and-or-icmp-zero.ll
index 15cfea4108e45..eb1d05d1822ce 100644
--- a/llvm/test/Transforms/InstSimplify/and-or-icmp-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/and-or-icmp-zero.ll
@@ -128,15 +128,15 @@ define i1 @and_cmps_eq_zero_with_mask_commute4(i64 %x, i64 %y) {
 
 ; or (icmp eq (and (ptrtoint P), ?), 0), (icmp eq P, 0) --> icmp eq (and (ptrtoint P), ?), 0
 
-define i1 @or_cmps_ptr_eq_zero_with_mask_commute1(i64* %p, i64 %y) {
+define i1 @or_cmps_ptr_eq_zero_with_mask_commute1(ptr %p, i64 %y) {
 ; CHECK-LABEL: @or_cmps_ptr_eq_zero_with_mask_commute1(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint i64* [[P:%.*]] to i64
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and i64 [[X]], [[Y:%.*]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_ZERO:%.*]] = icmp eq i64 [[SOMEBITS]], 0
 ; CHECK-NEXT:    ret i1 [[SOMEBITS_ARE_ZERO]]
 ;
-  %isnull = icmp eq i64* %p, null
-  %x = ptrtoint i64* %p to i64
+  %isnull = icmp eq ptr %p, null
+  %x = ptrtoint ptr %p to i64
   %somebits = and i64 %x, %y
   %somebits_are_zero = icmp eq i64 %somebits, 0
   %r = or i1 %somebits_are_zero, %isnull
@@ -145,15 +145,15 @@ define i1 @or_cmps_ptr_eq_zero_with_mask_commute1(i64* %p, i64 %y) {
 
 ; or (icmp eq P, 0), (icmp eq (and (ptrtoint P), ?), 0) --> icmp eq (and (ptrtoint P), ?), 0
 
-define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute2(<2 x i64*> %p, <2 x i64> %y) {
+define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute2(<2 x ptr> %p, <2 x i64> %y) {
 ; CHECK-LABEL: @or_cmps_ptr_eq_zero_with_mask_commute2(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint <2 x i64*> [[P:%.*]] to <2 x i64>
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint <2 x ptr> [[P:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and <2 x i64> [[X]], [[Y:%.*]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_ZERO:%.*]] = icmp eq <2 x i64> [[SOMEBITS]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[SOMEBITS_ARE_ZERO]]
 ;
-  %isnull = icmp eq <2 x i64*> %p, zeroinitializer
-  %x = ptrtoint <2 x i64*> %p to <2 x i64>
+  %isnull = icmp eq <2 x ptr> %p, zeroinitializer
+  %x = ptrtoint <2 x ptr> %p to <2 x i64>
   %somebits = and <2 x i64> %x, %y
   %somebits_are_zero = icmp eq <2 x i64> %somebits, zeroinitializer
   %r = or <2 x i1> %isnull, %somebits_are_zero
@@ -162,15 +162,15 @@ define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute2(<2 x i64*> %p, <2 x i64>
 
 ; or (icmp eq (and ?, (ptrtoint P)), 0), (icmp eq P, 0) --> icmp eq (and ?, (ptrtoint P)), 0
 
-define i1 @or_cmps_ptr_eq_zero_with_mask_commute3(i4* %p, i4 %y) {
+define i1 @or_cmps_ptr_eq_zero_with_mask_commute3(ptr %p, i4 %y) {
 ; CHECK-LABEL: @or_cmps_ptr_eq_zero_with_mask_commute3(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint i4* [[P:%.*]] to i4
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint ptr [[P:%.*]] to i4
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and i4 [[Y:%.*]], [[X]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_ZERO:%.*]] = icmp eq i4 [[SOMEBITS]], 0
 ; CHECK-NEXT:    ret i1 [[SOMEBITS_ARE_ZERO]]
 ;
-  %isnull = icmp eq i4* %p, null
-  %x = ptrtoint i4* %p to i4
+  %isnull = icmp eq ptr %p, null
+  %x = ptrtoint ptr %p to i4
   %somebits = and i4 %y, %x
   %somebits_are_zero = icmp eq i4 %somebits, 0
   %r = or i1 %somebits_are_zero, %isnull
@@ -179,15 +179,15 @@ define i1 @or_cmps_ptr_eq_zero_with_mask_commute3(i4* %p, i4 %y) {
 
 ; or (icmp eq P, 0), (icmp eq (and ?, (ptrtoint P)), 0) --> icmp eq (and ?, (ptrtoint P)), 0
 
-define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute4(<2 x i4*> %p, <2 x i4> %y) {
+define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute4(<2 x ptr> %p, <2 x i4> %y) {
 ; CHECK-LABEL: @or_cmps_ptr_eq_zero_with_mask_commute4(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint <2 x i4*> [[P:%.*]] to <2 x i4>
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint <2 x ptr> [[P:%.*]] to <2 x i4>
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and <2 x i4> [[Y:%.*]], [[X]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_ZERO:%.*]] = icmp eq <2 x i4> [[SOMEBITS]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[SOMEBITS_ARE_ZERO]]
 ;
-  %isnull = icmp eq <2 x i4*> %p, zeroinitializer
-  %x = ptrtoint <2 x i4*> %p to <2 x i4>
+  %isnull = icmp eq <2 x ptr> %p, zeroinitializer
+  %x = ptrtoint <2 x ptr> %p to <2 x i4>
   %somebits = and <2 x i4> %y, %x
   %somebits_are_zero = icmp eq <2 x i4> %somebits, zeroinitializer
   %r = or <2 x i1> %isnull, %somebits_are_zero
@@ -196,15 +196,15 @@ define <2 x i1> @or_cmps_ptr_eq_zero_with_mask_commute4(<2 x i4*> %p, <2 x i4> %
 
 ; and (icmp ne (and (ptrtoint P), ?), 0), (icmp ne P, 0) --> icmp ne (and (ptrtoint P), ?), 0
 
-define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute1(<3 x i4*> %p, <3 x i4> %y) {
+define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute1(<3 x ptr> %p, <3 x i4> %y) {
 ; CHECK-LABEL: @and_cmps_ptr_eq_zero_with_mask_commute1(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint <3 x i4*> [[P:%.*]] to <3 x i4>
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint <3 x ptr> [[P:%.*]] to <3 x i4>
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and <3 x i4> [[X]], [[Y:%.*]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_NOT_ZERO:%.*]] = icmp ne <3 x i4> [[SOMEBITS]], zeroinitializer
 ; CHECK-NEXT:    ret <3 x i1> [[SOMEBITS_ARE_NOT_ZERO]]
 ;
-  %isnotnull = icmp ne <3 x i4*> %p, zeroinitializer
-  %x = ptrtoint <3 x i4*> %p to <3 x i4>
+  %isnotnull = icmp ne <3 x ptr> %p, zeroinitializer
+  %x = ptrtoint <3 x ptr> %p to <3 x i4>
   %somebits = and <3 x i4> %x, %y
   %somebits_are_not_zero = icmp ne <3 x i4> %somebits, zeroinitializer
   %r = and <3 x i1> %somebits_are_not_zero, %isnotnull
@@ -213,15 +213,15 @@ define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute1(<3 x i4*> %p, <3 x i4>
 
 ; and (icmp ne P, 0), (icmp ne (and (ptrtoint P), ?), 0) --> icmp ne (and (ptrtoint P), ?), 0
 
-define i1 @and_cmps_ptr_eq_zero_with_mask_commute2(i4* %p, i4 %y) {
+define i1 @and_cmps_ptr_eq_zero_with_mask_commute2(ptr %p, i4 %y) {
 ; CHECK-LABEL: @and_cmps_ptr_eq_zero_with_mask_commute2(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint i4* [[P:%.*]] to i4
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint ptr [[P:%.*]] to i4
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and i4 [[X]], [[Y:%.*]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_NOT_ZERO:%.*]] = icmp ne i4 [[SOMEBITS]], 0
 ; CHECK-NEXT:    ret i1 [[SOMEBITS_ARE_NOT_ZERO]]
 ;
-  %isnotnull = icmp ne i4* %p, null
-  %x = ptrtoint i4* %p to i4
+  %isnotnull = icmp ne ptr %p, null
+  %x = ptrtoint ptr %p to i4
   %somebits = and i4 %x, %y
   %somebits_are_not_zero = icmp ne i4 %somebits, 0
   %r = and i1 %isnotnull, %somebits_are_not_zero
@@ -230,15 +230,15 @@ define i1 @and_cmps_ptr_eq_zero_with_mask_commute2(i4* %p, i4 %y) {
 
 ; and (icmp ne (and ?, (ptrtoint P)), 0), (icmp ne P, 0) --> icmp ne (and ?, (ptrtoint P)), 0
 
-define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute3(<3 x i64*> %p, <3 x i64> %y) {
+define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute3(<3 x ptr> %p, <3 x i64> %y) {
 ; CHECK-LABEL: @and_cmps_ptr_eq_zero_with_mask_commute3(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint <3 x i64*> [[P:%.*]] to <3 x i64>
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint <3 x ptr> [[P:%.*]] to <3 x i64>
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and <3 x i64> [[Y:%.*]], [[X]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_NOT_ZERO:%.*]] = icmp ne <3 x i64> [[SOMEBITS]], zeroinitializer
 ; CHECK-NEXT:    ret <3 x i1> [[SOMEBITS_ARE_NOT_ZERO]]
 ;
-  %isnotnull = icmp ne <3 x i64*> %p, zeroinitializer
-  %x = ptrtoint <3 x i64*> %p to <3 x i64>
+  %isnotnull = icmp ne <3 x ptr> %p, zeroinitializer
+  %x = ptrtoint <3 x ptr> %p to <3 x i64>
   %somebits = and <3 x i64> %y, %x
   %somebits_are_not_zero = icmp ne <3 x i64> %somebits, zeroinitializer
   %r = and <3 x i1> %somebits_are_not_zero, %isnotnull
@@ -247,15 +247,15 @@ define <3 x i1> @and_cmps_ptr_eq_zero_with_mask_commute3(<3 x i64*> %p, <3 x i64
 
 ; and (icmp ne P, 0), (icmp ne (and ?, (ptrtoint P)), 0) --> icmp ne (and ?, (ptrtoint P)), 0
 
-define i1 @and_cmps_ptr_eq_zero_with_mask_commute4(i64* %p, i64 %y) {
+define i1 @and_cmps_ptr_eq_zero_with_mask_commute4(ptr %p, i64 %y) {
 ; CHECK-LABEL: @and_cmps_ptr_eq_zero_with_mask_commute4(
-; CHECK-NEXT:    [[X:%.*]] = ptrtoint i64* [[P:%.*]] to i64
+; CHECK-NEXT:    [[X:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[SOMEBITS:%.*]] = and i64 [[Y:%.*]], [[X]]
 ; CHECK-NEXT:    [[SOMEBITS_ARE_NOT_ZERO:%.*]] = icmp ne i64 [[SOMEBITS]], 0
 ; CHECK-NEXT:    ret i1 [[SOMEBITS_ARE_NOT_ZERO]]
 ;
-  %isnotnull = icmp ne i64* %p, null
-  %x = ptrtoint i64* %p to i64
+  %isnotnull = icmp ne ptr %p, null
+  %x = ptrtoint ptr %p to i64
   %somebits = and i64 %y, %x
   %somebits_are_not_zero = icmp ne i64 %somebits, 0
   %r = and i1 %isnotnull, %somebits_are_not_zero

diff  --git a/llvm/test/Transforms/InstSimplify/assume-non-zero.ll b/llvm/test/Transforms/InstSimplify/assume-non-zero.ll
index 7d8d009ffbf4a..9176b8101da65 100644
--- a/llvm/test/Transforms/InstSimplify/assume-non-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/assume-non-zero.ll
@@ -6,29 +6,29 @@ target triple = "x86_64-unknown-linux-gnu"
 
 declare void @llvm.assume(i1) #1
 
-define i1 @nonnull0_true(i8* %x) {
+define i1 @nonnull0_true(ptr %x) {
 ; CHECK-LABEL: @nonnull0_true(
-; CHECK-NEXT:    [[A:%.*]] = icmp ne i8* [[X:%.*]], null
+; CHECK-NEXT:    [[A:%.*]] = icmp ne ptr [[X:%.*]], null
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[A]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %a = icmp ne i8* %x, null
+  %a = icmp ne ptr %x, null
   call void @llvm.assume(i1 %a)
-  %q = icmp ne i8* %x, null
+  %q = icmp ne ptr %x, null
   ret i1 %q
 }
 
-define i1 @nonnull1_true(i8* %x) {
+define i1 @nonnull1_true(ptr %x) {
 ; CHECK-LABEL: @nonnull1_true(
-; CHECK-NEXT:    [[INTPTR:%.*]] = ptrtoint i8* [[X:%.*]] to i64
+; CHECK-NEXT:    [[INTPTR:%.*]] = ptrtoint ptr [[X:%.*]] to i64
 ; CHECK-NEXT:    [[A:%.*]] = icmp ne i64 [[INTPTR]], 0
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[A]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %intptr = ptrtoint i8* %x to i64
+  %intptr = ptrtoint ptr %x to i64
   %a = icmp ne i64 %intptr, 0
   call void @llvm.assume(i1 %a)
-  %q = icmp ne i8* %x, null
+  %q = icmp ne ptr %x, null
   ret i1 %q
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/call.ll b/llvm/test/Transforms/InstSimplify/call.ll
index cb59201e37040..243551ec2b9d5 100644
--- a/llvm/test/Transforms/InstSimplify/call.ll
+++ b/llvm/test/Transforms/InstSimplify/call.ll
@@ -360,93 +360,93 @@ define float @test_idempotence(float %a) {
   ret float %r5
 }
 
-define i8* @operator_new() {
+define ptr @operator_new() {
 ; CHECK-LABEL: @operator_new(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias i8* @_Znwm(i64 8)
+; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @_Znwm(i64 8)
 ; CHECK-NEXT:    br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
 ; CHECK:       cast.notnull:
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
 ; CHECK-NEXT:    br label [[CAST_END]]
 ; CHECK:       cast.end:
-; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i8* [[CAST_RESULT]]
+; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
 ;
 entry:
-  %call = tail call noalias i8* @_Znwm(i64 8)
-  %cmp = icmp eq i8* %call, null
+  %call = tail call noalias ptr @_Znwm(i64 8)
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cast.end, label %cast.notnull
 
 cast.notnull:                                     ; preds = %entry
-  %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
+  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
   br label %cast.end
 
 cast.end:                                         ; preds = %cast.notnull, %entry
-  %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ]
-  ret i8* %cast.result
+  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
+  ret ptr %cast.result
 
 }
 
-declare nonnull noalias i8* @_Znwm(i64)
+declare nonnull noalias ptr @_Znwm(i64)
 
 %"struct.std::nothrow_t" = type { i8 }
 @_ZSt7nothrow = external global %"struct.std::nothrow_t"
 
-define i8* @operator_new_nothrow_t() {
+define ptr @operator_new_nothrow_t() {
 ; CHECK-LABEL: @operator_new_nothrow_t(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow)
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
 ; CHECK:       cast.notnull:
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
 ; CHECK-NEXT:    br label [[CAST_END]]
 ; CHECK:       cast.end:
-; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i8* [[CAST_RESULT]]
+; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
 ;
 entry:
-  %call = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow)
-  %cmp = icmp eq i8* %call, null
+  %call = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cast.end, label %cast.notnull
 
 cast.notnull:                                     ; preds = %entry
-  %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
+  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
   br label %cast.end
 
 cast.end:                                         ; preds = %cast.notnull, %entry
-  %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ]
-  ret i8* %cast.result
+  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
+  ret ptr %cast.result
 
 }
 
-declare i8* @_ZnamRKSt9nothrow_t(i64, %"struct.std::nothrow_t"*) nounwind
+declare ptr @_ZnamRKSt9nothrow_t(i64, ptr) nounwind
 
-define i8* @malloc_can_return_null() {
+define ptr @malloc_can_return_null() {
 ; CHECK-LABEL: @malloc_can_return_null(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias i8* @malloc(i64 8)
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @malloc(i64 8)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
 ; CHECK:       cast.notnull:
-; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
 ; CHECK-NEXT:    br label [[CAST_END]]
 ; CHECK:       cast.end:
-; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i8* [[CAST_RESULT]]
+; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
 ;
 entry:
-  %call = tail call noalias i8* @malloc(i64 8)
-  %cmp = icmp eq i8* %call, null
+  %call = tail call noalias ptr @malloc(i64 8)
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cast.end, label %cast.notnull
 
 cast.notnull:                                     ; preds = %entry
-  %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
+  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
   br label %cast.end
 
 cast.end:                                         ; preds = %cast.notnull, %entry
-  %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ]
-  ret i8* %cast.result
+  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
+  ret ptr %cast.result
 
 }
 
@@ -478,21 +478,21 @@ define <8 x i32> @partial_masked_load() {
 ; CHECK-LABEL: @partial_masked_load(
 ; CHECK-NEXT:    ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
 ;
-  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -2) to <8 x i32>*), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
+  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -2), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
   ret <8 x i32> %masked.load
 }
 
-define <8 x i32> @masked_load_undef_mask(<8 x i32>* %V) {
+define <8 x i32> @masked_load_undef_mask(ptr %V) {
 ; CHECK-LABEL: @masked_load_undef_mask(
 ; CHECK-NEXT:    ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>
 ;
-  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>)
+  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>)
   ret <8 x i32> %masked.load
 }
 
-declare noalias i8* @malloc(i64)
+declare noalias ptr @malloc(i64)
 
-declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>)
+declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>)
 
 declare double @llvm.powi.f64.i16(double, i16)
 declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16)
@@ -1311,7 +1311,7 @@ define <2 x double> @negated_mag_arg_vec(<2 x double> %x) {
 ; for call graph passes.
 
 declare i32 @passthru_i32(i32 returned)
-declare i8* @passthru_p8(i8* returned)
+declare ptr @passthru_p8(ptr returned)
 
 define i32 @returned_const_int_arg() {
 ; CHECK-LABEL: @returned_const_int_arg(
@@ -1322,13 +1322,13 @@ define i32 @returned_const_int_arg() {
   ret i32 %x
 }
 
-define i8* @returned_const_ptr_arg() {
+define ptr @returned_const_ptr_arg() {
 ; CHECK-LABEL: @returned_const_ptr_arg(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @passthru_p8(i8* null)
-; CHECK-NEXT:    ret i8* [[X]]
+; CHECK-NEXT:    [[X:%.*]] = call ptr @passthru_p8(ptr null)
+; CHECK-NEXT:    ret ptr [[X]]
 ;
-  %x = call i8* @passthru_p8(i8* null)
-  ret i8* %x
+  %x = call ptr @passthru_p8(ptr null)
+  ret ptr %x
 }
 
 define i32 @returned_var_arg(i32 %arg) {
@@ -1546,18 +1546,18 @@ define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) {
   ret <3 x i33> %r
 }
 
-declare i8* @llvm.ptrmask.p0i8.i64(i8* , i64)
+declare ptr @llvm.ptrmask.p0.i64(ptr , i64)
 
 define i1 @capture_vs_recurse(i64 %mask) {
 ; CHECK-LABEL: @capture_vs_recurse(
-; CHECK-NEXT:    [[A:%.*]] = call noalias i8* @malloc(i64 8)
-; CHECK-NEXT:    [[B:%.*]] = call nonnull i8* @llvm.ptrmask.p0i8.i64(i8* [[A]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[A]], [[B]]
+; CHECK-NEXT:    [[A:%.*]] = call noalias ptr @malloc(i64 8)
+; CHECK-NEXT:    [[B:%.*]] = call nonnull ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[A]], [[B]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %a = call noalias i8* @malloc(i64 8)
-  %b = call nonnull i8* @llvm.ptrmask.p0i8.i64(i8* %a, i64 %mask)
-  %cmp = icmp eq i8* %a, %b
+  %a = call noalias ptr @malloc(i64 8)
+  %b = call nonnull ptr @llvm.ptrmask.p0.i64(ptr %a, i64 %mask)
+  %cmp = icmp eq ptr %a, %b
   ret i1 %cmp
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/cast.ll b/llvm/test/Transforms/InstSimplify/cast.ll
index 3e37c7f9db17c..8178f05be5cb9 100644
--- a/llvm/test/Transforms/InstSimplify/cast.ll
+++ b/llvm/test/Transforms/InstSimplify/cast.ll
@@ -13,25 +13,22 @@ entry:
   ret i1 %T
 }
 
-define i8* @test2(i8* %V) {
+define ptr @test2(ptr %V) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret i8* [[V:%.*]]
+; CHECK-NEXT:    ret ptr [[V:%.*]]
 ;
 entry:
-  %BC1 = bitcast i8* %V to i32*
-  %BC2 = bitcast i32* %BC1 to i8*
-  ret i8* %BC2
+  ret ptr %V
 }
 
-define i8* @test3(i8* %V) {
+define ptr @test3(ptr %V) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret i8* [[V:%.*]]
+; CHECK-NEXT:    ret ptr [[V:%.*]]
 ;
 entry:
-  %BC = bitcast i8* %V to i8*
-  ret i8* %BC
+  ret ptr %V
 }
 
 define i32 @test4() {
@@ -39,12 +36,11 @@ define i32 @test4() {
 ; CHECK-NEXT:    ret i32 4
 ;
   %alloca = alloca i32, align 4                                     ; alloca + 0
-  %gep = getelementptr inbounds i32, i32* %alloca, i32 1            ; alloca + 4
-  %bc = bitcast i32* %gep to [4 x i8]*                              ; alloca + 4
-  %pti = ptrtoint i32* %alloca to i32                               ; alloca
+  %gep = getelementptr inbounds i32, ptr %alloca, i32 1            ; alloca + 4
+  %pti = ptrtoint ptr %alloca to i32                               ; alloca
   %sub = sub i32 0, %pti                                            ; -alloca
-  %add = getelementptr [4 x i8], [4 x i8]* %bc, i32 0, i32 %sub     ; alloca + 4 - alloca == 4
-  %add_to_int = ptrtoint i8* %add to i32                            ; 4
+  %add = getelementptr [4 x i8], ptr %gep, i32 0, i32 %sub     ; alloca + 4 - alloca == 4
+  %add_to_int = ptrtoint ptr %add to i32                            ; 4
   ret i32 %add_to_int                                               ; 4
 }
 
@@ -53,11 +49,10 @@ define i32 @test5() {
 ; CHECK-NEXT:    ret i32 3
 ;
   %alloca = alloca i32, align 4                                     ; alloca + 0
-  %gep = getelementptr inbounds i32, i32* %alloca, i32 1            ; alloca + 4
-  %bc = bitcast i32* %gep to [4 x i8]*                              ; alloca + 4
-  %pti = ptrtoint i32* %alloca to i32                               ; alloca
+  %gep = getelementptr inbounds i32, ptr %alloca, i32 1            ; alloca + 4
+  %pti = ptrtoint ptr %alloca to i32                               ; alloca
   %sub = xor i32 %pti, -1                                           ; ~alloca
-  %add = getelementptr [4 x i8], [4 x i8]* %bc, i32 0, i32 %sub     ; alloca + 4 - alloca - 1 == 3
-  %add_to_int = ptrtoint i8* %add to i32                            ; 4
+  %add = getelementptr [4 x i8], ptr %gep, i32 0, i32 %sub     ; alloca + 4 - alloca - 1 == 3
+  %add_to_int = ptrtoint ptr %add to i32                            ; 4
   ret i32 %add_to_int                                               ; 4
 }

diff  --git a/llvm/test/Transforms/InstSimplify/cmp-alloca-offsets.ll b/llvm/test/Transforms/InstSimplify/cmp-alloca-offsets.ll
index b59fee40d3e13..4766a39081da9 100644
--- a/llvm/test/Transforms/InstSimplify/cmp-alloca-offsets.ll
+++ b/llvm/test/Transforms/InstSimplify/cmp-alloca-offsets.ll
@@ -13,15 +13,14 @@ define i1 @adjacent_alloca() {
 ; CHECK-LABEL: @adjacent_alloca(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 4
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A]], [[B_OFF]]
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 4
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 0
-  %b.off = getelementptr i8, i8* %b, i64 4
-  %res = icmp ne i8* %a.off, %b.off
+  %b.off = getelementptr i8, ptr %b, i64 4
+  %res = icmp ne ptr %a, %b.off
   ret i1 %res
 }
 
@@ -30,15 +29,14 @@ define i1 @adjacent_alloca2() {
 ; CHECK-LABEL: @adjacent_alloca2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 4
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 4
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 4
-  %b.off = getelementptr i8, i8* %b, i64 0
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 4
+  %res = icmp ne ptr %a.off, %b
   ret i1 %res
 }
 
@@ -47,16 +45,16 @@ define i1 @positive_non_equal_end() {
 ; CHECK-LABEL: @positive_non_equal_end(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 4
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 4
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 4
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 4
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 4
-  %b.off = getelementptr i8, i8* %b, i64 4
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 4
+  %b.off = getelementptr i8, ptr %b, i64 4
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -65,16 +63,16 @@ define i1 @positive_equal_past_end() {
 ; CHECK-LABEL: @positive_equal_past_end(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 8
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 12
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 8
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 12
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 8
-  %b.off = getelementptr i8, i8* %b, i64 12
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 8
+  %b.off = getelementptr i8, ptr %b, i64 12
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -84,9 +82,9 @@ define i1 @positive_non_equal() {
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 3
-  %b.off = getelementptr i8, i8* %b, i64 3
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 3
+  %b.off = getelementptr i8, ptr %b, i64 3
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -95,16 +93,16 @@ define i1 @one_neg_equal1() {
 ; CHECK-LABEL: @one_neg_equal1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 -1
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 3
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 -1
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 3
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 -1
-  %b.off = getelementptr i8, i8* %b, i64 3
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 -1
+  %b.off = getelementptr i8, ptr %b, i64 3
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -113,16 +111,16 @@ define i1 @one_neg_equal2() {
 ; CHECK-LABEL: @one_neg_equal2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 3
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 -1
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 3
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 -1
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 3
-  %b.off = getelementptr i8, i8* %b, i64 -1
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 3
+  %b.off = getelementptr i8, ptr %b, i64 -1
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -131,16 +129,16 @@ define i1 @both_neg_equal() {
 ; CHECK-LABEL: @both_neg_equal(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 -4
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 -8
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 -4
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 -8
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 -4
-  %b.off = getelementptr i8, i8* %b, i64 -8
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 -4
+  %b.off = getelementptr i8, ptr %b, i64 -8
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -149,16 +147,16 @@ define i1 @mixed_offsets1() {
 ; CHECK-LABEL: @mixed_offsets1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 -1
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 2
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 -1
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 2
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 -1
-  %b.off = getelementptr i8, i8* %b, i64 2
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 -1
+  %b.off = getelementptr i8, ptr %b, i64 2
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -167,16 +165,16 @@ define i1 @mixed_offsets2() {
 ; CHECK-LABEL: @mixed_offsets2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 1
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 -2
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 1
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 -2
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 1
-  %b.off = getelementptr i8, i8* %b, i64 -2
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 1
+  %b.off = getelementptr i8, ptr %b, i64 -2
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
@@ -185,16 +183,16 @@ define i1 @negative_in_other() {
 ; CHECK-LABEL: @negative_in_other(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i8, i32 4, align 1
 ; CHECK-NEXT:    [[B:%.*]] = alloca i8, i32 4, align 1
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, i8* [[A]], i64 -3
-; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, i8* [[B]], i64 -2
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i8* [[A_OFF]], [[B_OFF]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i8, ptr [[A]], i64 -3
+; CHECK-NEXT:    [[B_OFF:%.*]] = getelementptr i8, ptr [[B]], i64 -2
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B_OFF]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i8, i32 4
   %b = alloca i8, i32 4
-  %a.off = getelementptr i8, i8* %a, i64 -3
-  %b.off = getelementptr i8, i8* %b, i64 -2
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr %a, i64 -3
+  %b.off = getelementptr i8, ptr %b, i64 -2
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index efe1d6e225012..7149d0a725948 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -9,7 +9,7 @@ define i1 @ptrtoint() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %a = alloca i8
-  %tmp = ptrtoint i8* %a to i32
+  %tmp = ptrtoint ptr %a to i32
   %r = icmp eq i32 %tmp, 0
   ret i1 %r
 }
@@ -20,9 +20,7 @@ define i1 @bitcast() {
 ;
   %a = alloca i32
   %b = alloca i64
-  %x = bitcast i32* %a to i8*
-  %y = bitcast i64* %b to i8*
-  %cmp = icmp eq i8* %x, %y
+  %cmp = icmp eq ptr %a, %b
   ret i1 %cmp
 }
 
@@ -31,8 +29,7 @@ define i1 @gep() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %a = alloca [3 x i8], align 8
-  %x = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
-  %cmp = icmp eq i8* %x, null
+  %cmp = icmp eq ptr %a, null
   ret i1 %cmp
 }
 
@@ -41,9 +38,7 @@ define i1 @gep2() {
 ; CHECK-NEXT:    ret i1 true
 ;
   %a = alloca [3 x i8], align 8
-  %x = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
-  %y = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
-  %cmp = icmp eq i8* %x, %y
+  %cmp = icmp eq ptr %a, %a
   ret i1 %cmp
 }
 
@@ -57,9 +52,8 @@ define i1 @gep3() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %x = alloca %gept, align 8
-  %a = getelementptr %gept, %gept* %x, i64 0, i32 0
-  %b = getelementptr %gept, %gept* %x, i64 0, i32 1
-  %equal = icmp eq i32* %a, %b
+  %b = getelementptr %gept, ptr %x, i64 0, i32 1
+  %equal = icmp eq ptr %x, %b
   ret i1 %equal
 }
 
@@ -68,9 +62,8 @@ define i1 @gep4() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %x = alloca %gept, align 8
-  %a = getelementptr %gept, %gept* @gepy, i64 0, i32 0
-  %b = getelementptr %gept, %gept* @gepy, i64 0, i32 1
-  %equal = icmp eq i32* %a, %b
+  %b = getelementptr %gept, ptr @gepy, i64 0, i32 1
+  %equal = icmp eq ptr @gepy, %b
   ret i1 %equal
 }
 
@@ -78,10 +71,10 @@ define i1 @gep4() {
 
 define i1 @PR31262() {
 ; CHECK-LABEL: @PR31262(
-; CHECK-NEXT:    ret i1 icmp uge (i32* getelementptr ([1 x i32], [1 x i32]* @a, i32 0, i32 undef), i32* getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0))
+; CHECK-NEXT:    ret i1 true
 ;
-  %idx = getelementptr inbounds [1 x i32], [1 x i32]* @a, i64 0, i64 undef
-  %cmp = icmp uge i32* %idx, getelementptr inbounds ([1 x i32], [1 x i32]* @a, i32 0, i32 0)
+  %idx = getelementptr inbounds [1 x i32], ptr @a, i64 0, i64 undef
+  %cmp = icmp uge ptr %idx, @a
   ret i1 %cmp
 }
 
@@ -90,195 +83,189 @@ define i1 @gep5() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %x = alloca %gept, align 8
-  %a = getelementptr inbounds %gept, %gept* %x, i64 0, i32 1
-  %b = getelementptr %gept, %gept* @gepy, i64 0, i32 0
-  %equal = icmp eq i32* %a, %b
+  %a = getelementptr inbounds %gept, ptr %x, i64 0, i32 1
+  %equal = icmp eq ptr %a, @gepy
   ret i1 %equal
 }
 
-define i1 @gep6(%gept* %x) {
+define i1 @gep6(ptr %x) {
 ; Same as @gep3 but potentially null.
 ; CHECK-LABEL: @gep6(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = getelementptr %gept, %gept* %x, i64 0, i32 0
-  %b = getelementptr %gept, %gept* %x, i64 0, i32 1
-  %equal = icmp eq i32* %a, %b
+  %b = getelementptr %gept, ptr %x, i64 0, i32 1
+  %equal = icmp eq ptr %x, %b
   ret i1 %equal
 }
 
-define i1 @gep7(%gept* %x) {
+define i1 @gep7(ptr %x) {
 ; CHECK-LABEL: @gep7(
-; CHECK-NEXT:    [[A:%.*]] = getelementptr [[GEPT:%.*]], %gept* [[X:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[EQUAL:%.*]] = icmp eq i32* [[A]], getelementptr ([[GEPT]], %gept* @gepz, i32 0, i32 0)
+; CHECK-NEXT:    [[EQUAL:%.*]] = icmp eq ptr [[X:%.*]], @gepz
 ; CHECK-NEXT:    ret i1 [[EQUAL]]
 ;
-  %a = getelementptr %gept, %gept* %x, i64 0, i32 0
-  %b = getelementptr %gept, %gept* @gepz, i64 0, i32 0
-  %equal = icmp eq i32* %a, %b
+  %equal = icmp eq ptr %x, @gepz
   ret i1 %equal
 }
 
-define i1 @gep8(%gept* %x) {
+define i1 @gep8(ptr %x) {
 ; CHECK-LABEL: @gep8(
-; CHECK-NEXT:    [[A:%.*]] = getelementptr [[GEPT:%.*]], %gept* [[X:%.*]], i32 1
-; CHECK-NEXT:    [[B:%.*]] = getelementptr [[GEPT]], %gept* [[X]], i32 -1
-; CHECK-NEXT:    [[EQUAL:%.*]] = icmp ugt %gept* [[A]], [[B]]
+; CHECK-NEXT:    [[A:%.*]] = getelementptr [[GEPT:%.*]], ptr [[X:%.*]], i32 1
+; CHECK-NEXT:    [[B:%.*]] = getelementptr [[GEPT]], ptr [[X]], i32 -1
+; CHECK-NEXT:    [[EQUAL:%.*]] = icmp ugt ptr [[A]], [[B]]
 ; CHECK-NEXT:    ret i1 [[EQUAL]]
 ;
-  %a = getelementptr %gept, %gept* %x, i32 1
-  %b = getelementptr %gept, %gept* %x, i32 -1
-  %equal = icmp ugt %gept* %a, %b
+  %a = getelementptr %gept, ptr %x, i32 1
+  %b = getelementptr %gept, ptr %x, i32 -1
+  %equal = icmp ugt ptr %a, %b
   ret i1 %equal
 }
 
-define i1 @gep9(i8* %ptr) {
+define i1 @gep9(ptr %ptr) {
 ; CHECK-LABEL: @gep9(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    ret i1 true
 ;
 entry:
-  %first1 = getelementptr inbounds i8, i8* %ptr, i32 0
-  %first2 = getelementptr inbounds i8, i8* %first1, i32 1
-  %first3 = getelementptr inbounds i8, i8* %first2, i32 2
-  %first4 = getelementptr inbounds i8, i8* %first3, i32 4
-  %last1 = getelementptr inbounds i8, i8* %first2, i32 48
-  %last2 = getelementptr inbounds i8, i8* %last1, i32 8
-  %last3 = getelementptr inbounds i8, i8* %last2, i32 -4
-  %last4 = getelementptr inbounds i8, i8* %last3, i32 -4
-  %first.int = ptrtoint i8* %first4 to i32
-  %last.int = ptrtoint i8* %last4 to i32
+  %first2 = getelementptr inbounds i8, ptr %ptr, i32 1
+  %first3 = getelementptr inbounds i8, ptr %first2, i32 2
+  %first4 = getelementptr inbounds i8, ptr %first3, i32 4
+  %last1 = getelementptr inbounds i8, ptr %first2, i32 48
+  %last2 = getelementptr inbounds i8, ptr %last1, i32 8
+  %last3 = getelementptr inbounds i8, ptr %last2, i32 -4
+  %last4 = getelementptr inbounds i8, ptr %last3, i32 -4
+  %first.int = ptrtoint ptr %first4 to i32
+  %last.int = ptrtoint ptr %last4 to i32
   %cmp = icmp ne i32 %last.int, %first.int
   ret i1 %cmp
 }
 
-define i1 @gep10(i8* %ptr) {
+define i1 @gep10(ptr %ptr) {
 ; CHECK-LABEL: @gep10(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    ret i1 true
 ;
 entry:
-  %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
-  %first2 = getelementptr inbounds i8, i8* %first1, i32 44
-  %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
-  %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
-  %first.int = ptrtoint i8* %first2 to i32
-  %last.int = ptrtoint i8* %last2 to i32
+  %first1 = getelementptr inbounds i8, ptr %ptr, i32 -2
+  %first2 = getelementptr inbounds i8, ptr %first1, i32 44
+  %last1 = getelementptr inbounds i8, ptr %ptr, i32 48
+  %last2 = getelementptr inbounds i8, ptr %last1, i32 -6
+  %first.int = ptrtoint ptr %first2 to i32
+  %last.int = ptrtoint ptr %last2 to i32
   %cmp = icmp eq i32 %last.int, %first.int
   ret i1 %cmp
 }
 
-define i1 @gep11(i8* %ptr) {
+define i1 @gep11(ptr %ptr) {
 ; CHECK-LABEL: @gep11(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    ret i1 true
 ;
 entry:
-  %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
-  %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
-  %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
-  %cmp = icmp ult i8* %first1, %last2
+  %first1 = getelementptr inbounds i8, ptr %ptr, i32 -2
+  %last1 = getelementptr inbounds i8, ptr %ptr, i32 48
+  %last2 = getelementptr inbounds i8, ptr %last1, i32 -6
+  %cmp = icmp ult ptr %first1, %last2
   ret i1 %cmp
 }
 
-define i1 @gep12(i8* %ptr) {
+define i1 @gep12(ptr %ptr) {
 ; CHECK-LABEL: @gep12(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FIRST1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i32 -2
-; CHECK-NEXT:    [[LAST1:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i32 48
-; CHECK-NEXT:    [[LAST2:%.*]] = getelementptr inbounds i8, i8* [[LAST1]], i32 -6
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8* [[FIRST1]], [[LAST2]]
+; CHECK-NEXT:    [[FIRST1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i32 -2
+; CHECK-NEXT:    [[LAST1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 48
+; CHECK-NEXT:    [[LAST2:%.*]] = getelementptr inbounds i8, ptr [[LAST1]], i32 -6
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[FIRST1]], [[LAST2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
 entry:
-  %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
-  %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
-  %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
-  %cmp = icmp slt i8* %first1, %last2
+  %first1 = getelementptr inbounds i8, ptr %ptr, i32 -2
+  %last1 = getelementptr inbounds i8, ptr %ptr, i32 48
+  %last2 = getelementptr inbounds i8, ptr %last1, i32 -6
+  %cmp = icmp slt ptr %first1, %last2
   ret i1 %cmp
 }
 
-define i1 @gep13(i8* %ptr) {
+define i1 @gep13(ptr %ptr) {
 ; CHECK-LABEL: @gep13(
 ; CHECK-NEXT:    ret i1 false
 ;
 ; We can prove this GEP is non-null because it is inbounds.
-  %x = getelementptr inbounds i8, i8* %ptr, i32 1
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds i8, ptr %ptr, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep13_no_null_opt(i8* %ptr) #0 {
+define i1 @gep13_no_null_opt(ptr %ptr) #0 {
 ; We can't prove this GEP is non-null.
 ; CHECK-LABEL: @gep13_no_null_opt(
-; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i32 1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i32 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %x = getelementptr inbounds i8, i8* %ptr, i32 1
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds i8, ptr %ptr, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep14({ {}, i8 }* %ptr) {
+define i1 @gep14(ptr %ptr) {
 ; CHECK-LABEL: @gep14(
-; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds { {}, i8 }, { {}, i8 }* [[PTR:%.*]], i32 0, i32 1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds { {}, i8 }, ptr [[PTR:%.*]], i32 0, i32 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
 ; We can't simplify this because the offset of one in the GEP actually doesn't
 ; move the pointer.
-  %x = getelementptr inbounds { {}, i8 }, { {}, i8 }* %ptr, i32 0, i32 1
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds { {}, i8 }, ptr %ptr, i32 0, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep15({ {}, [4 x {i8, i8}]}* %ptr, i32 %y) {
+define i1 @gep15(ptr %ptr, i32 %y) {
 ; CHECK-LABEL: @gep15(
 ; CHECK-NEXT:    ret i1 false
 ;
 ; We can prove this GEP is non-null even though there is a user value, as we
 ; would necessarily violate inbounds on one side or the other.
-  %x = getelementptr inbounds { {}, [4 x {i8, i8}]}, { {}, [4 x {i8, i8}]}* %ptr, i32 0, i32 1, i32 %y, i32 1
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds { {}, [4 x {i8, i8}]}, ptr %ptr, i32 0, i32 1, i32 %y, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep15_no_null_opt({ {}, [4 x {i8, i8}]}* %ptr, i32 %y) #0 {
+define i1 @gep15_no_null_opt(ptr %ptr, i32 %y) #0 {
 ; We can't prove this GEP is non-null.
 ; CHECK-LABEL: @gep15_no_null_opt(
-; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds { {}, [4 x { i8, i8 }] }, { {}, [4 x { i8, i8 }] }* [[PTR:%.*]], i32 0, i32 1, i32 [[Y:%.*]], i32 1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds { {}, [4 x { i8, i8 }] }, ptr [[PTR:%.*]], i32 0, i32 1, i32 [[Y:%.*]], i32 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %x = getelementptr inbounds { {}, [4 x {i8, i8}]}, { {}, [4 x {i8, i8}]}* %ptr, i32 0, i32 1, i32 %y, i32 1
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds { {}, [4 x {i8, i8}]}, ptr %ptr, i32 0, i32 1, i32 %y, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep16(i8* %ptr, i32 %a) {
+define i1 @gep16(ptr %ptr, i32 %a) {
 ; CHECK-LABEL: @gep16(
 ; CHECK-NEXT:    ret i1 false
 ;
 ; We can prove this GEP is non-null because it is inbounds and because we know
 ; %b is non-zero even though we don't know its value.
   %b = or i32 %a, 1
-  %x = getelementptr inbounds i8, i8* %ptr, i32 %b
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds i8, ptr %ptr, i32 %b
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @gep16_no_null_opt(i8* %ptr, i32 %a) #0 {
+define i1 @gep16_no_null_opt(ptr %ptr, i32 %a) #0 {
 ; We can't prove this GEP is non-null.
 ; CHECK-LABEL: @gep16_no_null_opt(
 ; CHECK-NEXT:    [[B:%.*]] = or i32 [[A:%.*]], 1
-; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i32 [[B]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i32 [[B]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %b = or i32 %a, 1
-  %x = getelementptr inbounds i8, i8* %ptr, i32 %b
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds i8, ptr %ptr, i32 %b
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
@@ -287,26 +274,25 @@ define i1 @gep17() {
 ; CHECK-NEXT:    ret i1 true
 ;
   %alloca = alloca i32, align 4
-  %bc = bitcast i32* %alloca to [4 x i8]*
-  %gep1 = getelementptr inbounds i32, i32* %alloca, i32 1
-  %pti1 = ptrtoint i32* %gep1 to i32
-  %gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %bc, i32 0, i32 1
-  %pti2 = ptrtoint i8* %gep2 to i32
+  %gep1 = getelementptr inbounds i32, ptr %alloca, i32 1
+  %pti1 = ptrtoint ptr %gep1 to i32
+  %gep2 = getelementptr inbounds [4 x i8], ptr %alloca, i32 0, i32 1
+  %pti2 = ptrtoint ptr %gep2 to i32
   %cmp = icmp ugt i32 %pti1, %pti2
   ret i1 %cmp
 }
 
 ; Negative test: GEP inbounds may cross sign boundary.
-define i1 @gep_same_base_constant_indices(i8* %a) {
+define i1 @gep_same_base_constant_indices(ptr %a) {
 ; CHECK-LABEL: @gep_same_base_constant_indices(
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 1
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 10
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8* [[ARRAYIDX1]], [[ARRAYIDX2]]
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 10
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[ARRAYIDX1]], [[ARRAYIDX2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1
-  %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10
-  %cmp = icmp slt i8* %arrayidx1, %arrayidx2
+  %arrayidx1 = getelementptr inbounds i8, ptr %a, i64 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %a, i64 10
+  %cmp = icmp slt ptr %arrayidx1, %arrayidx2
   ret i1 %cmp
 }
 
@@ -500,16 +486,16 @@ define i1 @or(i32 %x) {
 
 ; Do not simplify if we cannot guarantee that the ConstantExpr is a non-zero
 ; constant.
- at GV = common global i32* null
+ at GV = common global ptr null
 define i1 @or_constexp(i32 %x) {
 ; CHECK-LABEL: @or_constexp(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[O:%.*]] = or i32 [[X:%.*]], and (i32 ptrtoint (i32** @GV to i32), i32 32)
+; CHECK-NEXT:    [[O:%.*]] = or i32 [[X:%.*]], and (i32 ptrtoint (ptr @GV to i32), i32 32)
 ; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[O]], 0
 ; CHECK-NEXT:    ret i1 [[C]]
 ;
 entry:
-  %0 = and i32 ptrtoint (i32** @GV to i32), 32
+  %0 = and i32 ptrtoint (ptr @GV to i32), 32
   %o = or i32 %x, %0
   %c = icmp eq i32 %o, 0
   ret i1 %c
@@ -1179,21 +1165,21 @@ define i1 @alloca_compare(i64 %idx) {
 ; CHECK-NEXT:    ret i1 false
 ;
   %sv = alloca { i32, i32, [124 x i32] }
-  %1 = getelementptr inbounds { i32, i32, [124 x i32] }, { i32, i32, [124 x i32] }* %sv, i32 0, i32 2, i64 %idx
-  %2 = icmp eq i32* %1, null
+  %1 = getelementptr inbounds { i32, i32, [124 x i32] }, ptr %sv, i32 0, i32 2, i64 %idx
+  %2 = icmp eq ptr %1, null
   ret i1 %2
 }
 
 define i1 @alloca_compare_no_null_opt(i64 %idx) #0 {
 ; CHECK-LABEL: @alloca_compare_no_null_opt(
 ; CHECK-NEXT:    [[SV:%.*]] = alloca { i32, i32, [124 x i32] }, align 8
-; CHECK-NEXT:    [[CMP:%.*]] = getelementptr inbounds { i32, i32, [124 x i32] }, { i32, i32, [124 x i32] }* [[SV]], i32 0, i32 2, i64 [[IDX:%.*]]
-; CHECK-NEXT:    [[X:%.*]] = icmp eq i32* [[CMP]], null
+; CHECK-NEXT:    [[CMP:%.*]] = getelementptr inbounds { i32, i32, [124 x i32] }, ptr [[SV]], i32 0, i32 2, i64 [[IDX:%.*]]
+; CHECK-NEXT:    [[X:%.*]] = icmp eq ptr [[CMP]], null
 ; CHECK-NEXT:    ret i1 [[X]]
 ;
   %sv = alloca { i32, i32, [124 x i32] }
-  %cmp = getelementptr inbounds { i32, i32, [124 x i32] }, { i32, i32, [124 x i32] }* %sv, i32 0, i32 2, i64 %idx
-  %X = icmp eq i32* %cmp, null
+  %cmp = getelementptr inbounds { i32, i32, [124 x i32] }, ptr %sv, i32 0, i32 2, i64 %idx
+  %X = icmp eq ptr %cmp, null
   ret i1 %X
 }
 ; PR12075
@@ -1201,15 +1187,15 @@ define i1 @infinite_gep() {
 ; CHECK-LABEL: @infinite_gep(
 ; CHECK-NEXT:    ret i1 true
 ; CHECK:       unreachableblock:
-; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, i32* [[X]], i32 1
-; CHECK-NEXT:    [[Y:%.*]] = icmp eq i32* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, ptr [[X]], i32 1
+; CHECK-NEXT:    [[Y:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[Y]]
 ;
   ret i1 1
 
 unreachableblock:
-  %X = getelementptr i32, i32 *%X, i32 1
-  %Y = icmp eq i32* %X, null
+  %X = getelementptr i32, ptr%X, i32 1
+  %Y = icmp eq ptr %X, null
   ret i1 %Y
 }
 
@@ -1218,27 +1204,27 @@ unreachableblock:
 ; relies on restrictions against guessing an object's address and dereferencing.
 ; There are no restrictions against guessing an object's address and comparing.
 
-define i1 @alloca_argument_compare(i64* %arg) {
+define i1 @alloca_argument_compare(ptr %arg) {
 ; CHECK-LABEL: @alloca_argument_compare(
 ; CHECK-NEXT:    [[ALLOC:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64* [[ARG:%.*]], [[ALLOC]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[ARG:%.*]], [[ALLOC]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %alloc = alloca i64
-  %cmp = icmp eq i64* %arg, %alloc
+  %cmp = icmp eq ptr %arg, %alloc
   ret i1 %cmp
 }
 
 ; As above, but with the operands reversed.
 
-define i1 @alloca_argument_compare_swapped(i64* %arg) {
+define i1 @alloca_argument_compare_swapped(ptr %arg) {
 ; CHECK-LABEL: @alloca_argument_compare_swapped(
 ; CHECK-NEXT:    [[ALLOC:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64* [[ALLOC]], [[ARG:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[ALLOC]], [[ARG:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %alloc = alloca i64
-  %cmp = icmp eq i64* %alloc, %arg
+  %cmp = icmp eq ptr %alloc, %arg
   ret i1 %cmp
 }
 
@@ -1247,12 +1233,12 @@ define i1 @alloca_argument_compare_swapped(i64* %arg) {
 ; 
diff erent from actual pointer inequality.
 
 @y = external global i32
-define zeroext i1 @external_compare(i32* noalias %x) {
+define zeroext i1 @external_compare(ptr noalias %x) {
 ; CHECK-LABEL: @external_compare(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[X:%.*]], @y
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X:%.*]], @y
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp eq i32* %x, @y
+  %cmp = icmp eq ptr %x, @y
   ret i1 %cmp
 }
 
@@ -1263,46 +1249,46 @@ define i1 @alloca_gep(i64 %a, i64 %b) {
 ; We can prove this GEP is non-null because it is inbounds and the pointer
 ; is non-null.
   %strs = alloca [1000 x [1001 x i8]], align 16
-  %x = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %a, i64 %b
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds [1000 x [1001 x i8]], ptr %strs, i64 0, i64 %a, i64 %b
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
 define i1 @alloca_gep_no_null_opt(i64 %a, i64 %b) #0 {
 ; CHECK-LABEL: @alloca_gep_no_null_opt(
 ; CHECK-NEXT:    [[STRS:%.*]] = alloca [1000 x [1001 x i8]], align 16
-; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* [[STRS]], i64 0, i64 [[A:%.*]], i64 [[B:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[X]], null
+; CHECK-NEXT:    [[X:%.*]] = getelementptr inbounds [1000 x [1001 x i8]], ptr [[STRS]], i64 0, i64 [[A:%.*]], i64 [[B:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[X]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
 ; We can't prove this GEP is non-null.
   %strs = alloca [1000 x [1001 x i8]], align 16
-  %x = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %a, i64 %b
-  %cmp = icmp eq i8* %x, null
+  %x = getelementptr inbounds [1000 x [1001 x i8]], ptr %strs, i64 0, i64 %a, i64 %b
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 
-define i1 @non_inbounds_gep_compare(i64* %a) {
+define i1 @non_inbounds_gep_compare(ptr %a) {
 ; CHECK-LABEL: @non_inbounds_gep_compare(
 ; CHECK-NEXT:    ret i1 true
 ;
 ; Equality compares with non-inbounds GEPs can be folded.
-  %x = getelementptr i64, i64* %a, i64 42
-  %y = getelementptr inbounds i64, i64* %x, i64 -42
-  %z = getelementptr i64, i64* %a, i64 -42
-  %w = getelementptr inbounds i64, i64* %z, i64 42
-  %cmp = icmp eq i64* %y, %w
+  %x = getelementptr i64, ptr %a, i64 42
+  %y = getelementptr inbounds i64, ptr %x, i64 -42
+  %z = getelementptr i64, ptr %a, i64 -42
+  %w = getelementptr inbounds i64, ptr %z, i64 42
+  %cmp = icmp eq ptr %y, %w
   ret i1 %cmp
 }
 
-define i1 @non_inbounds_gep_compare2(i64* %a) {
+define i1 @non_inbounds_gep_compare2(ptr %a) {
 ; CHECK-LABEL: @non_inbounds_gep_compare2(
 ; CHECK-NEXT:    ret i1 true
 ;
 ; Equality compares with non-inbounds GEPs can be folded.
-  %x = getelementptr i64, i64* %a, i64 4294967297
-  %y = getelementptr i64, i64* %a, i64 1
-  %cmp = icmp eq i64* %y, %y
+  %x = getelementptr i64, ptr %a, i64 4294967297
+  %y = getelementptr i64, ptr %a, i64 1
+  %cmp = icmp eq ptr %y, %y
   ret i1 %cmp
 }
 
@@ -1435,115 +1421,115 @@ define i1 @lshr_ugt_false(i32 %a) {
   ret i1 %cmp
 }
 
-define i1 @nonnull_arg(i32* nonnull %i) {
+define i1 @nonnull_arg(ptr nonnull %i) {
 ; CHECK-LABEL: @nonnull_arg(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i32* %i, null
+  %cmp = icmp eq ptr %i, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_arg_no_null_opt(i32* nonnull %i) #0 {
+define i1 @nonnull_arg_no_null_opt(ptr nonnull %i) #0 {
 ; CHECK-LABEL: @nonnull_arg_no_null_opt(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i32* %i, null
+  %cmp = icmp eq ptr %i, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_deref_arg(i32* dereferenceable(4) %i) {
+define i1 @nonnull_deref_arg(ptr dereferenceable(4) %i) {
 ; CHECK-LABEL: @nonnull_deref_arg(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i32* %i, null
+  %cmp = icmp eq ptr %i, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_deref_arg_no_null_opt(i32* dereferenceable(4) %i) #0 {
+define i1 @nonnull_deref_arg_no_null_opt(ptr dereferenceable(4) %i) #0 {
 ; CHECK-LABEL: @nonnull_deref_arg_no_null_opt(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[I:%.*]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[I:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp eq i32* %i, null
+  %cmp = icmp eq ptr %i, null
   ret i1 %cmp
 }
-define i1 @nonnull_deref_as_arg(i32 addrspace(1)* dereferenceable(4) %i) {
+define i1 @nonnull_deref_as_arg(ptr addrspace(1) dereferenceable(4) %i) {
 ; CHECK-LABEL: @nonnull_deref_as_arg(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(1)* [[I:%.*]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr addrspace(1) [[I:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp eq i32 addrspace(1)* %i, null
+  %cmp = icmp eq ptr addrspace(1) %i, null
   ret i1 %cmp
 }
 
-declare nonnull i32* @returns_nonnull_helper()
+declare nonnull ptr @returns_nonnull_helper()
 define i1 @returns_nonnull() {
 ; CHECK-LABEL: @returns_nonnull(
-; CHECK-NEXT:    [[CALL:%.*]] = call nonnull i32* @returns_nonnull_helper()
+; CHECK-NEXT:    [[CALL:%.*]] = call nonnull ptr @returns_nonnull_helper()
 ; CHECK-NEXT:    ret i1 false
 ;
-  %call = call nonnull i32* @returns_nonnull_helper()
-  %cmp = icmp eq i32* %call, null
+  %call = call nonnull ptr @returns_nonnull_helper()
+  %cmp = icmp eq ptr %call, null
   ret i1 %cmp
 }
 
-declare dereferenceable(4) i32* @returns_nonnull_deref_helper()
+declare dereferenceable(4) ptr @returns_nonnull_deref_helper()
 define i1 @returns_nonnull_deref() {
 ; CHECK-LABEL: @returns_nonnull_deref(
-; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) i32* @returns_nonnull_deref_helper()
+; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) ptr @returns_nonnull_deref_helper()
 ; CHECK-NEXT:    ret i1 false
 ;
-  %call = call dereferenceable(4) i32* @returns_nonnull_deref_helper()
-  %cmp = icmp eq i32* %call, null
+  %call = call dereferenceable(4) ptr @returns_nonnull_deref_helper()
+  %cmp = icmp eq ptr %call, null
   ret i1 %cmp
 }
 
 define i1 @returns_nonnull_deref_no_null_opt () #0 {
 ; CHECK-LABEL: @returns_nonnull_deref_no_null_opt(
-; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) i32* @returns_nonnull_deref_helper()
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) ptr @returns_nonnull_deref_helper()
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALL]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %call = call dereferenceable(4) i32* @returns_nonnull_deref_helper()
-  %cmp = icmp eq i32* %call, null
+  %call = call dereferenceable(4) ptr @returns_nonnull_deref_helper()
+  %cmp = icmp eq ptr %call, null
   ret i1 %cmp
 }
 
-declare dereferenceable(4) i32 addrspace(1)* @returns_nonnull_deref_as_helper()
+declare dereferenceable(4) ptr addrspace(1) @returns_nonnull_deref_as_helper()
 define i1 @returns_nonnull_as_deref() {
 ; CHECK-LABEL: @returns_nonnull_as_deref(
-; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) i32 addrspace(1)* @returns_nonnull_deref_as_helper()
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(1)* [[CALL]], null
+; CHECK-NEXT:    [[CALL:%.*]] = call dereferenceable(4) ptr addrspace(1) @returns_nonnull_deref_as_helper()
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr addrspace(1) [[CALL]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %call = call dereferenceable(4) i32 addrspace(1)* @returns_nonnull_deref_as_helper()
-  %cmp = icmp eq i32 addrspace(1)* %call, null
+  %call = call dereferenceable(4) ptr addrspace(1) @returns_nonnull_deref_as_helper()
+  %cmp = icmp eq ptr addrspace(1) %call, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_load(i32** %addr) {
+define i1 @nonnull_load(ptr %addr) {
 ; CHECK-LABEL: @nonnull_load(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %ptr = load i32*, i32** %addr, !nonnull !{}
-  %cmp = icmp eq i32* %ptr, null
+  %ptr = load ptr, ptr %addr, !nonnull !{}
+  %cmp = icmp eq ptr %ptr, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_load_as_outer(i32* addrspace(1)* %addr) {
+define i1 @nonnull_load_as_outer(ptr addrspace(1) %addr) {
 ; CHECK-LABEL: @nonnull_load_as_outer(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %ptr = load i32*, i32* addrspace(1)* %addr, !nonnull !{}
-  %cmp = icmp eq i32* %ptr, null
+  %ptr = load ptr, ptr addrspace(1) %addr, !nonnull !{}
+  %cmp = icmp eq ptr %ptr, null
   ret i1 %cmp
 }
-define i1 @nonnull_load_as_inner(i32 addrspace(1)** %addr) {
+define i1 @nonnull_load_as_inner(ptr %addr) {
 ; CHECK-LABEL: @nonnull_load_as_inner(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %ptr = load i32 addrspace(1)*, i32 addrspace(1)** %addr, !nonnull !{}
-  %cmp = icmp eq i32 addrspace(1)* %ptr, null
+  %ptr = load ptr addrspace(1), ptr %addr, !nonnull !{}
+  %cmp = icmp eq ptr addrspace(1) %ptr, null
   ret i1 %cmp
 }
 
@@ -2095,7 +2081,7 @@ define i1 @constant_fold_inttoptr_null() {
 ; CHECK-LABEL: @constant_fold_inttoptr_null(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %x = icmp eq i32* inttoptr (i64 32 to i32*), null
+  %x = icmp eq ptr inttoptr (i64 32 to ptr), null
   ret i1 %x
 }
 
@@ -2103,17 +2089,17 @@ define i1 @constant_fold_null_inttoptr() {
 ; CHECK-LABEL: @constant_fold_null_inttoptr(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %x = icmp eq i32* null, inttoptr (i64 32 to i32*)
+  %x = icmp eq ptr null, inttoptr (i64 32 to ptr)
   ret i1 %x
 }
 
-define i1 @cmp_through_addrspacecast(i32 addrspace(1)* %p1) {
+define i1 @cmp_through_addrspacecast(ptr addrspace(1) %p1) {
 ; CHECK-LABEL: @cmp_through_addrspacecast(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %p0 = addrspacecast i32 addrspace(1)* %p1 to i32*
-  %p0.1 = getelementptr inbounds i32, i32* %p0, i64 1
-  %cmp = icmp ne i32* %p0, %p0.1
+  %p0 = addrspacecast ptr addrspace(1) %p1 to ptr
+  %p0.1 = getelementptr inbounds i32, ptr %p0, i64 1
+  %cmp = icmp ne ptr %p0, %p0.1
   ret i1 %cmp
 }
 
@@ -2705,12 +2691,12 @@ define i1 @zero_sized_alloca1() {
 ; CHECK-LABEL: @zero_sized_alloca1(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, i32 0, align 4
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, i32 0, align 4
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i32* [[A]], [[B]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A]], [[B]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i32, i32 0
   %b = alloca i32, i32 0
-  %res = icmp ne i32* %a, %b
+  %res = icmp ne ptr %a, %b
   ret i1 %res
 }
 
@@ -2718,12 +2704,12 @@ define i1 @zero_sized_alloca2() {
 ; CHECK-LABEL: @zero_sized_alloca2(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, i32 0, align 4
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i32* [[A]], [[B]]
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A]], [[B]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %a = alloca i32, i32 0
   %b = alloca i32
-  %res = icmp ne i32* %a, %b
+  %res = icmp ne ptr %a, %b
   ret i1 %res
 }
 
@@ -2733,98 +2719,96 @@ define i1 @scalar_vectors_are_non_empty() {
 ;
   %a = alloca <vscale x 2 x i32>
   %b = alloca <vscale x 2 x i32>
-  %res = icmp ne <vscale x 2 x i32>* %a, %b
+  %res = icmp ne ptr %a, %b
   ret i1 %res
 }
 
 ; Never equal
-define i1 @byval_args_inequal(i32* byval(i32) %a, i32* byval(i32) %b) {
+define i1 @byval_args_inequal(ptr byval(i32) %a, ptr byval(i32) %b) {
 ; CHECK-LABEL: @byval_args_inequal(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %res = icmp ne i32* %a, %b
+  %res = icmp ne ptr %a, %b
   ret i1 %res
 }
 
 ; Arguments can be adjacent on the stack
-define i1 @neg_args_adjacent(i32* byval(i32) %a, i32* byval(i32) %b) {
+define i1 @neg_args_adjacent(ptr byval(i32) %a, ptr byval(i32) %b) {
 ; CHECK-LABEL: @neg_args_adjacent(
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 1
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i32* [[A_OFF]], [[B:%.*]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B:%.*]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
-  %a.off = getelementptr i32, i32* %a, i32 1
-  %res = icmp ne i32* %a.off, %b
+  %a.off = getelementptr i32, ptr %a, i32 1
+  %res = icmp ne ptr %a.off, %b
   ret i1 %res
 }
 
 ; Never equal
-define i1 @test_byval_alloca_inequal(i32* byval(i32) %a) {
+define i1 @test_byval_alloca_inequal(ptr byval(i32) %a) {
 ; CHECK-LABEL: @test_byval_alloca_inequal(
 ; CHECK-NEXT:    ret i1 true
 ;
   %b = alloca i32
-  %res = icmp ne i32* %a, %b
+  %res = icmp ne ptr %a, %b
   ret i1 %res
 }
 
 ; Byval argument can be immediately before alloca, and crossing
 ; over is allowed.
-define i1 @neg_byval_alloca_adjacent(i32* byval(i32) %a) {
+define i1 @neg_byval_alloca_adjacent(ptr byval(i32) %a) {
 ; CHECK-LABEL: @neg_byval_alloca_adjacent(
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 1
-; CHECK-NEXT:    [[RES:%.*]] = icmp ne i32* [[A_OFF]], [[B]]
+; CHECK-NEXT:    [[A_OFF:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    [[RES:%.*]] = icmp ne ptr [[A_OFF]], [[B]]
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
   %b = alloca i32
-  %a.off = getelementptr i32, i32* %a, i32 1
-  %res = icmp ne i32* %a.off, %b
+  %a.off = getelementptr i32, ptr %a, i32 1
+  %res = icmp ne ptr %a.off, %b
   ret i1 %res
 }
 
 @A = global i32 0
 @B = global i32 0
- at A.alias = alias i32, i32* @A
+ at A.alias = alias i32, ptr @A
 
 define i1 @globals_inequal() {
 ; CHECK-LABEL: @globals_inequal(
 ; CHECK-NEXT:    ret i1 true
 ;
-  %res = icmp ne i32* @A, @B
+  %res = icmp ne ptr @A, @B
   ret i1 %res
 }
 
 ; TODO: Never equal
 define i1 @globals_offset_inequal() {
 ; CHECK-LABEL: @globals_offset_inequal(
-; CHECK-NEXT:    ret i1 icmp ne (i8* getelementptr (i8, i8* bitcast (i32* @A to i8*), i32 1), i8* getelementptr (i8, i8* bitcast (i32* @B to i8*), i32 1))
+; CHECK-NEXT:    ret i1 icmp ne (ptr getelementptr inbounds (i8, ptr @A, i32 1), ptr getelementptr inbounds (i8, ptr @B, i32 1))
 ;
-  %a.cast = bitcast i32* @A to i8*
-  %a.off = getelementptr i8, i8* %a.cast, i32 1
-  %b.cast = bitcast i32* @B to i8*
-  %b.off = getelementptr i8, i8* %b.cast, i32 1
-  %res = icmp ne i8* %a.off, %b.off
+  %a.off = getelementptr i8, ptr @A, i32 1
+  %b.off = getelementptr i8, ptr @B, i32 1
+  %res = icmp ne ptr %a.off, %b.off
   ret i1 %res
 }
 
 
 ; Never equal
-define i1 @test_byval_global_inequal(i32* byval(i32) %a) {
+define i1 @test_byval_global_inequal(ptr byval(i32) %a) {
 ; CHECK-LABEL: @test_byval_global_inequal(
 ; CHECK-NEXT:    ret i1 true
 ;
   %b = alloca i32
-  %res = icmp ne i32* %a, @B
+  %res = icmp ne ptr %a, @B
   ret i1 %res
 }
 
 
 define i1 @neg_global_alias() {
 ; CHECK-LABEL: @neg_global_alias(
-; CHECK-NEXT:    ret i1 icmp ne (i32* @A, i32* @A.alias)
+; CHECK-NEXT:    ret i1 icmp ne (ptr @A, ptr @A.alias)
 ;
-  %res = icmp ne i32* @A, @A.alias
+  %res = icmp ne ptr @A, @A.alias
   ret i1 %res
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll
index 0e56be8a18408..e45aa3fd09ce0 100644
--- a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll
+++ b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll
@@ -15,32 +15,32 @@ define i32 @test_bswap(i32 %a) nounwind {
   ret i32 %tmp4
 }
 
-define void @powi(double %V, double *%P) {
+define void @powi(double %V, ptr%P) {
 ; CHECK-LABEL: @powi(
-; CHECK-NEXT:    store volatile double 1.000000e+00, double* [[P:%.*]], align 8
-; CHECK-NEXT:    store volatile double [[V:%.*]], double* [[P]], align 8
+; CHECK-NEXT:    store volatile double 1.000000e+00, ptr [[P:%.*]], align 8
+; CHECK-NEXT:    store volatile double [[V:%.*]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %B = tail call double @llvm.powi.f64.i32(double %V, i32 0) nounwind
-  store volatile double %B, double* %P
+  store volatile double %B, ptr %P
 
   %C = tail call double @llvm.powi.f64.i32(double %V, i32 1) nounwind
-  store volatile double %C, double* %P
+  store volatile double %C, ptr %P
 
   ret void
 }
 
-define void @powi_i16(float %V, float *%P) {
+define void @powi_i16(float %V, ptr%P) {
 ; CHECK-LABEL: @powi_i16(
-; CHECK-NEXT:    store volatile float 1.000000e+00, float* [[P:%.*]], align 4
-; CHECK-NEXT:    store volatile float [[V:%.*]], float* [[P]], align 4
+; CHECK-NEXT:    store volatile float 1.000000e+00, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store volatile float [[V:%.*]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %B = tail call float @llvm.powi.f32.i16(float %V, i16 0) nounwind
-  store volatile float %B, float* %P
+  store volatile float %B, ptr %P
 
   %C = tail call float @llvm.powi.f32.i16(float %V, i16 1) nounwind
-  store volatile float %C, float* %P
+  store volatile float %C, ptr %P
 
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/freeze-noundef.ll b/llvm/test/Transforms/InstSimplify/freeze-noundef.ll
index 3cd960311c998..f51b92d2f4188 100644
--- a/llvm/test/Transforms/InstSimplify/freeze-noundef.ll
+++ b/llvm/test/Transforms/InstSimplify/freeze-noundef.ll
@@ -105,22 +105,22 @@ define i1 @used_by_fncall(i1 %x) {
   ret i1 %f
 }
 
-define i32 @noundef_metadata(i32* %p) {
+define i32 @noundef_metadata(ptr %p) {
 ; CHECK-LABEL: @noundef_metadata(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4, !noundef !0
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4, !noundef !0
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
-  %v = load i32, i32* %p, !noundef !{}
+  %v = load i32, ptr %p, !noundef !{}
   %v.fr = freeze i32 %v
   ret i32 %v.fr
 }
 
-define {i8, i32} @noundef_metadata2({i8, i32}* %p) {
+define {i8, i32} @noundef_metadata2(ptr %p) {
 ; CHECK-LABEL: @noundef_metadata2(
-; CHECK-NEXT:    [[V:%.*]] = load { i8, i32 }, { i8, i32 }* [[P:%.*]], align 4, !noundef !0
+; CHECK-NEXT:    [[V:%.*]] = load { i8, i32 }, ptr [[P:%.*]], align 4, !noundef !0
 ; CHECK-NEXT:    ret { i8, i32 } [[V]]
 ;
-  %v = load {i8, i32}, {i8, i32}* %p, !noundef !{}
+  %v = load {i8, i32}, ptr %p, !noundef !{}
   %v.fr = freeze {i8, i32} %v
   ret {i8, i32} %v.fr
 }

diff  --git a/llvm/test/Transforms/InstSimplify/freeze.ll b/llvm/test/Transforms/InstSimplify/freeze.ll
index dd5842710a08f..6c4b16076e724 100644
--- a/llvm/test/Transforms/InstSimplify/freeze.ll
+++ b/llvm/test/Transforms/InstSimplify/freeze.ll
@@ -29,28 +29,28 @@ define float @make_const2() {
 
 @glb = constant i32 0
 
-define i32* @make_const_glb() {
+define ptr @make_const_glb() {
 ; CHECK-LABEL: @make_const_glb(
-; CHECK-NEXT:    ret i32* @glb
+; CHECK-NEXT:    ret ptr @glb
 ;
-  %k = freeze i32* @glb
-  ret i32* %k
+  %k = freeze ptr @glb
+  ret ptr %k
 }
 
-define i32()* @make_const_fn() {
+define ptr @make_const_fn() {
 ; CHECK-LABEL: @make_const_fn(
-; CHECK-NEXT:    ret i32 ()* @make_const
+; CHECK-NEXT:    ret ptr @make_const
 ;
-  %k = freeze i32()* @make_const
-  ret i32()* %k
+  %k = freeze ptr @make_const
+  ret ptr %k
 }
 
-define i32* @make_const_null() {
+define ptr @make_const_null() {
 ; CHECK-LABEL: @make_const_null(
-; CHECK-NEXT:    ret i32* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %k = freeze i32* null
-  ret i32* %k
+  %k = freeze ptr null
+  ret ptr %k
 }
 
 define <2 x i32> @constvector() {
@@ -115,34 +115,34 @@ define <2 x float> @constvector_FP_noopt() {
 
 define float @constant_expr() {
 ; CHECK-LABEL: @constant_expr(
-; CHECK-NEXT:    ret float bitcast (i32 ptrtoint (i16* @g to i32) to float)
+; CHECK-NEXT:    ret float bitcast (i32 ptrtoint (ptr @g to i32) to float)
 ;
-  %r = freeze float bitcast (i32 ptrtoint (i16* @g to i32) to float)
+  %r = freeze float bitcast (i32 ptrtoint (ptr @g to i32) to float)
   ret float %r
 }
 
-define i8* @constant_expr2() {
+define ptr @constant_expr2() {
 ; CHECK-LABEL: @constant_expr2(
-; CHECK-NEXT:    ret i8* bitcast (i16* @g to i8*)
+; CHECK-NEXT:    ret ptr @g
 ;
-  %r = freeze i8* bitcast (i16* @g to i8*)
-  ret i8* %r
+  %r = freeze ptr @g
+  ret ptr %r
 }
 
-define i32* @constant_expr3() {
+define ptr @constant_expr3() {
 ; CHECK-LABEL: @constant_expr3(
-; CHECK-NEXT:    ret i32* getelementptr (i32, i32* @glb, i64 3)
+; CHECK-NEXT:    ret ptr getelementptr (i32, ptr @glb, i64 3)
 ;
-  %r = freeze i32* getelementptr (i32, i32* @glb, i64 3)
-  ret i32* %r
+  %r = freeze ptr getelementptr (i32, ptr @glb, i64 3)
+  ret ptr %r
 }
 
 define i64 @ptr
diff () {
 ; CHECK-LABEL: @ptr
diff (
-; CHECK-NEXT:    ret i64 sub (i64 ptrtoint (i16* @g to i64), i64 ptrtoint (i16* @g2 to i64))
+; CHECK-NEXT:    ret i64 sub (i64 ptrtoint (ptr @g to i64), i64 ptrtoint (ptr @g2 to i64))
 ;
-  %i = ptrtoint i16* @g to i64
-  %i2 = ptrtoint i16* @g2 to i64
+  %i = ptrtoint ptr @g to i64
+  %i2 = ptrtoint ptr @g2 to i64
   %
diff  = sub i64 %i, %i2
   %r = freeze i64 %
diff 
   ret i64 %r
@@ -152,162 +152,158 @@ define i64 @ptr
diff () {
 
 define <2 x i31> @vector_element_constant_expr() {
 ; CHECK-LABEL: @vector_element_constant_expr(
-; CHECK-NEXT:    [[R:%.*]] = freeze <2 x i31> <i31 34, i31 ptrtoint (i16* @g to i31)>
+; CHECK-NEXT:    [[R:%.*]] = freeze <2 x i31> <i31 34, i31 ptrtoint (ptr @g to i31)>
 ; CHECK-NEXT:    ret <2 x i31> [[R]]
 ;
-  %r = freeze <2 x i31> <i31 34, i31 ptrtoint (i16* @g to i31)>
+  %r = freeze <2 x i31> <i31 34, i31 ptrtoint (ptr @g to i31)>
   ret <2 x i31> %r
 }
 
 define void @alloca() {
 ; CHECK-LABEL: @alloca(
 ; CHECK-NEXT:    [[P:%.*]] = alloca i8, align 1
-; CHECK-NEXT:    call void @f3(i8* [[P]])
+; CHECK-NEXT:    call void @f3(ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   %p = alloca i8
-  %y = freeze i8* %p
-  call void @f3(i8* %y)
+  %y = freeze ptr %p
+  call void @f3(ptr %y)
   ret void
 }
 
-define i8* @gep() {
+define ptr @gep() {
 ; CHECK-LABEL: @gep(
 ; CHECK-NEXT:    [[P:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr [4 x i8], [4 x i8]* [[P]], i32 0, i32 6
-; CHECK-NEXT:    ret i8* [[Q]]
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr [4 x i8], ptr [[P]], i32 0, i32 6
+; CHECK-NEXT:    ret ptr [[Q]]
 ;
   %p = alloca [4 x i8]
-  %q = getelementptr [4 x i8], [4 x i8]* %p, i32 0, i32 6
-  %q2 = freeze i8* %q
-  ret i8* %q2
+  %q = getelementptr [4 x i8], ptr %p, i32 0, i32 6
+  %q2 = freeze ptr %q
+  ret ptr %q2
 }
 
-define i8* @gep_noopt(i32 %arg) {
+define ptr @gep_noopt(i32 %arg) {
 ; CHECK-LABEL: @gep_noopt(
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr [4 x i8], [4 x i8]* null, i32 0, i32 [[ARG:%.*]]
-; CHECK-NEXT:    [[Q2:%.*]] = freeze i8* [[Q]]
-; CHECK-NEXT:    ret i8* [[Q2]]
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr [4 x i8], ptr null, i32 0, i32 [[ARG:%.*]]
+; CHECK-NEXT:    [[Q2:%.*]] = freeze ptr [[Q]]
+; CHECK-NEXT:    ret ptr [[Q2]]
 ;
-  %q = getelementptr [4 x i8], [4 x i8]* null, i32 0, i32 %arg
-  %q2 = freeze i8* %q
-  ret i8* %q2
+  %q = getelementptr [4 x i8], ptr null, i32 0, i32 %arg
+  %q2 = freeze ptr %q
+  ret ptr %q2
 }
 
-define i8* @gep_inbounds() {
+define ptr @gep_inbounds() {
 ; CHECK-LABEL: @gep_inbounds(
 ; CHECK-NEXT:    [[P:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[P]], i32 0, i32 0
-; CHECK-NEXT:    ret i8* [[Q]]
+; CHECK-NEXT:    ret ptr [[P]]
 ;
   %p = alloca [4 x i8]
-  %q = getelementptr inbounds [4 x i8], [4 x i8]* %p, i32 0, i32 0
-  %q2 = freeze i8* %q
-  ret i8* %q2
+  %q2 = freeze ptr %p
+  ret ptr %q2
 }
 
-define i8* @gep_inbounds_noopt(i32 %arg) {
+define ptr @gep_inbounds_noopt(i32 %arg) {
 ; CHECK-LABEL: @gep_inbounds_noopt(
 ; CHECK-NEXT:    [[P:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[P]], i32 0, i32 [[ARG:%.*]]
-; CHECK-NEXT:    [[Q2:%.*]] = freeze i8* [[Q]]
-; CHECK-NEXT:    ret i8* [[Q2]]
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr inbounds [4 x i8], ptr [[P]], i32 0, i32 [[ARG:%.*]]
+; CHECK-NEXT:    [[Q2:%.*]] = freeze ptr [[Q]]
+; CHECK-NEXT:    ret ptr [[Q2]]
 ;
   %p = alloca [4 x i8]
-  %q = getelementptr inbounds [4 x i8], [4 x i8]* %p, i32 0, i32 %arg
-  %q2 = freeze i8* %q
-  ret i8* %q2
+  %q = getelementptr inbounds [4 x i8], ptr %p, i32 0, i32 %arg
+  %q2 = freeze ptr %q
+  ret ptr %q2
 }
 
-define i32* @gep_inbounds_null() {
+define ptr @gep_inbounds_null() {
 ; CHECK-LABEL: @gep_inbounds_null(
-; CHECK-NEXT:    ret i32* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %p = getelementptr inbounds i32, i32* null, i32 0
-  %k = freeze i32* %p
-  ret i32* %k
+  %k = freeze ptr null
+  ret ptr %k
 }
 
-define i32* @gep_inbounds_null_noopt(i32* %p) {
+define ptr @gep_inbounds_null_noopt(ptr %p) {
 ; CHECK-LABEL: @gep_inbounds_null_noopt(
-; CHECK-NEXT:    [[K:%.*]] = freeze i32* [[P:%.*]]
-; CHECK-NEXT:    ret i32* [[K]]
+; CHECK-NEXT:    [[K:%.*]] = freeze ptr [[P:%.*]]
+; CHECK-NEXT:    ret ptr [[K]]
 ;
-  %q = getelementptr inbounds i32, i32* %p, i32 0
-  %k = freeze i32* %q
-  ret i32* %k
+  %k = freeze ptr %p
+  ret ptr %k
 }
 
-define i8* @load_ptr(i8* %ptr) {
+define ptr @load_ptr(ptr %ptr) {
 ; CHECK-LABEL: @load_ptr(
-; CHECK-NEXT:    [[V:%.*]] = load i8, i8* [[PTR:%.*]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i8, ptr [[PTR:%.*]], align 1
 ; CHECK-NEXT:    call void @f4(i8 [[V]])
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  %v = load i8, i8* %ptr
-  %q = freeze i8* %ptr
+  %v = load i8, ptr %ptr
+  %q = freeze ptr %ptr
   call void @f4(i8 %v) ; prevents %v from being DCEd
-  ret i8* %q
+  ret ptr %q
 }
 
-define i8* @store_ptr(i8* %ptr) {
+define ptr @store_ptr(ptr %ptr) {
 ; CHECK-LABEL: @store_ptr(
-; CHECK-NEXT:    store i8 0, i8* [[PTR:%.*]], align 1
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    store i8 0, ptr [[PTR:%.*]], align 1
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  store i8 0, i8* %ptr
-  %q = freeze i8* %ptr
-  ret i8* %q
+  store i8 0, ptr %ptr
+  %q = freeze ptr %ptr
+  ret ptr %q
 }
 
-define i8* @call_noundef_ptr(i8* %ptr) {
+define ptr @call_noundef_ptr(ptr %ptr) {
 ; CHECK-LABEL: @call_noundef_ptr(
-; CHECK-NEXT:    call void @f3(i8* noundef [[PTR:%.*]])
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    call void @f3(ptr noundef [[PTR:%.*]])
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  call void @f3(i8* noundef %ptr)
-  %q = freeze i8* %ptr
-  ret i8* %q
+  call void @f3(ptr noundef %ptr)
+  %q = freeze ptr %ptr
+  ret ptr %q
 }
 
-define i8* @invoke_noundef_ptr(i8* %ptr) personality i8 1 {
+define ptr @invoke_noundef_ptr(ptr %ptr) personality i8 1 {
 ; CHECK-LABEL: @invoke_noundef_ptr(
-; CHECK-NEXT:    invoke void @f3(i8* noundef [[PTR:%.*]])
+; CHECK-NEXT:    invoke void @f3(ptr noundef [[PTR:%.*]])
 ; CHECK-NEXT:    to label [[NORMAL:%.*]] unwind label [[UNWIND:%.*]]
 ; CHECK:       normal:
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    ret ptr [[PTR]]
 ; CHECK:       unwind:
-; CHECK-NEXT:    [[TMP1:%.*]] = landingpad i8*
+; CHECK-NEXT:    [[TMP1:%.*]] = landingpad ptr
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    resume i8* [[PTR]]
+; CHECK-NEXT:    resume ptr [[PTR]]
 ;
-  %q = freeze i8* %ptr
-  invoke void @f3(i8* noundef %ptr) to label %normal unwind label %unwind
+  %q = freeze ptr %ptr
+  invoke void @f3(ptr noundef %ptr) to label %normal unwind label %unwind
 normal:
-  ret i8* %q
+  ret ptr %q
 unwind:
-  landingpad i8* cleanup
-  resume i8* %q
+  landingpad ptr cleanup
+  resume ptr %q
 }
 
-define i8* @cmpxchg_ptr(i8* %ptr) {
+define ptr @cmpxchg_ptr(ptr %ptr) {
 ; CHECK-LABEL: @cmpxchg_ptr(
-; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg i8* [[PTR:%.*]], i8 1, i8 2 acq_rel monotonic, align 1
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i8 1, i8 2 acq_rel monotonic, align 1
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  cmpxchg i8* %ptr, i8 1, i8 2 acq_rel monotonic
-  %q = freeze i8* %ptr
-  ret i8* %q
+  cmpxchg ptr %ptr, i8 1, i8 2 acq_rel monotonic
+  %q = freeze ptr %ptr
+  ret ptr %q
 }
 
-define i8* @atomicrmw_ptr(i8* %ptr) {
+define ptr @atomicrmw_ptr(ptr %ptr) {
 ; CHECK-LABEL: @atomicrmw_ptr(
-; CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add i8* [[PTR:%.*]], i8 1 acquire, align 1
-; CHECK-NEXT:    ret i8* [[PTR]]
+; CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add ptr [[PTR:%.*]], i8 1 acquire, align 1
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  atomicrmw add i8* %ptr, i8 1 acquire
-  %q = freeze i8* %ptr
-  ret i8* %q
+  atomicrmw add ptr %ptr, i8 1 acquire
+  %q = freeze ptr %ptr
+  ret ptr %q
 }
 
 define i1 @icmp(i32 %a, i32 %b) {
@@ -502,5 +498,5 @@ B:
 }
 declare void @f1(i1)
 declare void @f2()
-declare void @f3(i8*)
+declare void @f3(ptr)
 declare void @f4(i8)

diff  --git a/llvm/test/Transforms/InstSimplify/gc_relocate.ll b/llvm/test/Transforms/InstSimplify/gc_relocate.ll
index 60a852f391d69..3f6de8b3845a2 100644
--- a/llvm/test/Transforms/InstSimplify/gc_relocate.ll
+++ b/llvm/test/Transforms/InstSimplify/gc_relocate.ll
@@ -3,17 +3,17 @@
 
 declare void @func()
 
-define void @dead_relocate(i32 addrspace(1)* %in) gc "statepoint-example" {
+define void @dead_relocate(ptr addrspace(1) %in) gc "statepoint-example" {
 ; CHECK-LABEL: @dead_relocate(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]]) ]
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]]) ]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in)]
-  %a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
+  %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
   ret void
 }
 
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)

diff  --git a/llvm/test/Transforms/InstSimplify/gep.ll b/llvm/test/Transforms/InstSimplify/gep.ll
index a6764eb1142dc..6f7db7b856345 100644
--- a/llvm/test/Transforms/InstSimplify/gep.ll
+++ b/llvm/test/Transforms/InstSimplify/gep.ll
@@ -5,356 +5,356 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
 %struct.A = type { [7 x i8] }
 
-define %struct.A* @test1(%struct.A* %b, %struct.A* %e) {
+define ptr @test1(ptr %b, ptr %e) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint %struct.A* [[E:%.*]] to i64
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint %struct.A* [[B:%.*]] to i64
+; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint ptr [[E:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 [[E_PTR]], [[B_PTR]]
 ; CHECK-NEXT:    [[SDIV:%.*]] = sdiv exact i64 [[SUB]], 7
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[B]], i64 [[SDIV]]
-; CHECK-NEXT:    ret %struct.A* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[B]], i64 [[SDIV]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %e_ptr = ptrtoint %struct.A* %e to i64
-  %b_ptr = ptrtoint %struct.A* %b to i64
+  %e_ptr = ptrtoint ptr %e to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 %e_ptr, %b_ptr
   %sdiv = sdiv exact i64 %sub, 7
-  %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv
-  ret %struct.A* %gep
+  %gep = getelementptr inbounds %struct.A, ptr %b, i64 %sdiv
+  ret ptr %gep
 }
 
-define i8* @test2(i8* %b, i8* %e) {
+define ptr @test2(ptr %b, ptr %e) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint i8* [[E:%.*]] to i64
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i8* [[B:%.*]] to i64
+; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint ptr [[E:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 [[E_PTR]], [[B_PTR]]
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[B]], i64 [[SUB]]
-; CHECK-NEXT:    ret i8* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[SUB]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %e_ptr = ptrtoint i8* %e to i64
-  %b_ptr = ptrtoint i8* %b to i64
+  %e_ptr = ptrtoint ptr %e to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 %e_ptr, %b_ptr
-  %gep = getelementptr inbounds i8, i8* %b, i64 %sub
-  ret i8* %gep
+  %gep = getelementptr inbounds i8, ptr %b, i64 %sub
+  ret ptr %gep
 }
 
-define i64* @test3(i64* %b, i64* %e) {
+define ptr @test3(ptr %b, ptr %e) {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint i64* [[E:%.*]] to i64
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i64* [[B:%.*]] to i64
+; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint ptr [[E:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 [[E_PTR]], [[B_PTR]]
 ; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i64 [[SUB]], 3
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[ASHR]]
-; CHECK-NEXT:    ret i64* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[ASHR]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %e_ptr = ptrtoint i64* %e to i64
-  %b_ptr = ptrtoint i64* %b to i64
+  %e_ptr = ptrtoint ptr %e to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 %e_ptr, %b_ptr
   %ashr = ashr exact i64 %sub, 3
-  %gep = getelementptr inbounds i64, i64* %b, i64 %ashr
-  ret i64* %gep
+  %gep = getelementptr inbounds i64, ptr %b, i64 %ashr
+  ret ptr %gep
 }
 
 ; The following tests should not be folded to null, because this would
 ; lose provenance of the base pointer %b.
 
-define %struct.A* @test4(%struct.A* %b) {
+define ptr @test4(ptr %b) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint %struct.A* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
 ; CHECK-NEXT:    [[SDIV:%.*]] = sdiv exact i64 [[SUB]], 7
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [[STRUCT_A:%.*]], %struct.A* [[B]], i64 [[SDIV]]
-; CHECK-NEXT:    ret %struct.A* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [[STRUCT_A:%.*]], ptr [[B]], i64 [[SDIV]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint %struct.A* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
   %sdiv = sdiv exact i64 %sub, 7
-  %gep = getelementptr %struct.A, %struct.A* %b, i64 %sdiv
-  ret %struct.A* %gep
+  %gep = getelementptr %struct.A, ptr %b, i64 %sdiv
+  ret ptr %gep
 }
 
-define %struct.A* @test4_inbounds(%struct.A* %b) {
+define ptr @test4_inbounds(ptr %b) {
 ; CHECK-LABEL: @test4_inbounds(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint %struct.A* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
 ; CHECK-NEXT:    [[SDIV:%.*]] = sdiv exact i64 [[SUB]], 7
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[B]], i64 [[SDIV]]
-; CHECK-NEXT:    ret %struct.A* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[B]], i64 [[SDIV]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint %struct.A* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
   %sdiv = sdiv exact i64 %sub, 7
-  %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv
-  ret %struct.A* %gep
+  %gep = getelementptr inbounds %struct.A, ptr %b, i64 %sdiv
+  ret ptr %gep
 }
 
-define i8* @test5(i8* %b) {
+define ptr @test5(ptr %b) {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i8* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, i8* [[B]], i64 [[SUB]]
-; CHECK-NEXT:    ret i8* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[SUB]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint i8* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
-  %gep = getelementptr i8, i8* %b, i64 %sub
-  ret i8* %gep
+  %gep = getelementptr i8, ptr %b, i64 %sub
+  ret ptr %gep
 }
 
-define i8* @test5_inbounds(i8* %b) {
+define ptr @test5_inbounds(ptr %b) {
 ; CHECK-LABEL: @test5_inbounds(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i8* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[B]], i64 [[SUB]]
-; CHECK-NEXT:    ret i8* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[SUB]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint i8* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
-  %gep = getelementptr inbounds i8, i8* %b, i64 %sub
-  ret i8* %gep
+  %gep = getelementptr inbounds i8, ptr %b, i64 %sub
+  ret ptr %gep
 }
 
-define i64* @test6(i64* %b) {
+define ptr @test6(ptr %b) {
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i64* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
 ; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i64 [[SUB]], 3
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[ASHR]]
-; CHECK-NEXT:    ret i64* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[ASHR]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint i64* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
   %ashr = ashr exact i64 %sub, 3
-  %gep = getelementptr i64, i64* %b, i64 %ashr
-  ret i64* %gep
+  %gep = getelementptr i64, ptr %b, i64 %ashr
+  ret ptr %gep
 }
 
-define i64* @test6_inbounds(i64* %b) {
+define ptr @test6_inbounds(ptr %b) {
 ; CHECK-LABEL: @test6_inbounds(
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i64* [[B:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 0, [[B_PTR]]
 ; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i64 [[SUB]], 3
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[ASHR]]
-; CHECK-NEXT:    ret i64* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[ASHR]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %b_ptr = ptrtoint i64* %b to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 0, %b_ptr
   %ashr = ashr exact i64 %sub, 3
-  %gep = getelementptr inbounds i64, i64* %b, i64 %ashr
-  ret i64* %gep
+  %gep = getelementptr inbounds i64, ptr %b, i64 %ashr
+  ret ptr %gep
 }
 
-define i8* @test7(i8* %b, i8** %e) {
+define ptr @test7(ptr %b, ptr %e) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint i8** [[E:%.*]] to i64
-; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint i8* [[B:%.*]] to i64
+; CHECK-NEXT:    [[E_PTR:%.*]] = ptrtoint ptr [[E:%.*]] to i64
+; CHECK-NEXT:    [[B_PTR:%.*]] = ptrtoint ptr [[B:%.*]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i64 [[E_PTR]], [[B_PTR]]
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, i8* [[B]], i64 [[SUB]]
-; CHECK-NEXT:    ret i8* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[SUB]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
-  %e_ptr = ptrtoint i8** %e to i64
-  %b_ptr = ptrtoint i8* %b to i64
+  %e_ptr = ptrtoint ptr %e to i64
+  %b_ptr = ptrtoint ptr %b to i64
   %sub = sub i64 %e_ptr, %b_ptr
-  %gep = getelementptr inbounds i8, i8* %b, i64 %sub
-  ret i8* %gep
+  %gep = getelementptr inbounds i8, ptr %b, i64 %sub
+  ret ptr %gep
 }
 
-define i64* @undef_inbounds_var_idx(i64 %idx) {
+define ptr @undef_inbounds_var_idx(i64 %idx) {
 ; CHECK-LABEL: @undef_inbounds_var_idx(
-; CHECK-NEXT:    ret i64* poison
+; CHECK-NEXT:    ret ptr poison
 ;
-  %el = getelementptr inbounds i64, i64* undef, i64 %idx
-  ret i64* %el
+  %el = getelementptr inbounds i64, ptr undef, i64 %idx
+  ret ptr %el
 }
 
-define i64* @undef_no_inbounds_var_idx(i64 %idx) {
+define ptr @undef_no_inbounds_var_idx(i64 %idx) {
 ; CHECK-LABEL: @undef_no_inbounds_var_idx(
-; CHECK-NEXT:    ret i64* undef
+; CHECK-NEXT:    ret ptr undef
 ;
-  %el = getelementptr i64, i64* undef, i64 %idx
-  ret i64* %el
+  %el = getelementptr i64, ptr undef, i64 %idx
+  ret ptr %el
 }
 
-define <8 x i64*> @undef_vec1() {
+define <8 x ptr> @undef_vec1() {
 ; CHECK-LABEL: @undef_vec1(
-; CHECK-NEXT:    ret <8 x i64*> poison
+; CHECK-NEXT:    ret <8 x ptr> poison
 ;
-  %el = getelementptr inbounds i64, i64* undef, <8 x i64> undef
-  ret <8 x i64*> %el
+  %el = getelementptr inbounds i64, ptr undef, <8 x i64> undef
+  ret <8 x ptr> %el
 }
 
-define <8 x i64*> @undef_vec2() {
+define <8 x ptr> @undef_vec2() {
 ; CHECK-LABEL: @undef_vec2(
-; CHECK-NEXT:    ret <8 x i64*> undef
+; CHECK-NEXT:    ret <8 x ptr> undef
 ;
-  %el = getelementptr i64, <8 x i64*> undef, <8 x i64> undef
-  ret <8 x i64*> %el
+  %el = getelementptr i64, <8 x ptr> undef, <8 x i64> undef
+  ret <8 x ptr> %el
 }
 
 ; Check ConstantExpr::getGetElementPtr() using ElementCount for size queries - begin.
 
 ; Constant ptr
 
-define i32* @ptr_idx_scalar() {
+define ptr @ptr_idx_scalar() {
 ; CHECK-LABEL: @ptr_idx_scalar(
-; CHECK-NEXT:    ret i32* inttoptr (i64 4 to i32*)
+; CHECK-NEXT:    ret ptr inttoptr (i64 4 to ptr)
 ;
-  %gep = getelementptr <4 x i32>, <4 x i32>* null, i64 0, i64 1
-  ret i32* %gep
+  %gep = getelementptr <4 x i32>, ptr null, i64 0, i64 1
+  ret ptr %gep
 }
 
-define <2 x i32*> @ptr_idx_vector() {
+define <2 x ptr> @ptr_idx_vector() {
 ; CHECK-LABEL: @ptr_idx_vector(
-; CHECK-NEXT:    ret <2 x i32*> getelementptr (i32, i32* null, <2 x i64> <i64 1, i64 1>)
+; CHECK-NEXT:    ret <2 x ptr> getelementptr (i32, ptr null, <2 x i64> <i64 1, i64 1>)
 ;
-  %gep = getelementptr i32, i32* null, <2 x i64> <i64 1, i64 1>
-  ret <2 x i32*> %gep
+  %gep = getelementptr i32, ptr null, <2 x i64> <i64 1, i64 1>
+  ret <2 x ptr> %gep
 }
 
-define <4 x i32*> @ptr_idx_mix_scalar_vector(){
+define <4 x ptr> @ptr_idx_mix_scalar_vector(){
 ; CHECK-LABEL: @ptr_idx_mix_scalar_vector(
-; CHECK-NEXT:    ret <4 x i32*> getelementptr ([42 x [3 x i32]], [42 x [3 x i32]]* null, <4 x i64> zeroinitializer, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x i64> zeroinitializer)
+; CHECK-NEXT:    ret <4 x ptr> getelementptr ([42 x [3 x i32]], ptr null, <4 x i64> zeroinitializer, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, <4 x i64> zeroinitializer)
 ;
-  %gep = getelementptr [42 x [3 x i32]], [42 x [3 x i32]]* null, i64 0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, i64 0
-  ret <4 x i32*> %gep
+  %gep = getelementptr [42 x [3 x i32]], ptr null, i64 0, <4 x i64> <i64 0, i64 1, i64 2, i64 3>, i64 0
+  ret <4 x ptr> %gep
 }
 
 ; Constant vector
 
-define <4 x i32*> @vector_idx_scalar() {
+define <4 x ptr> @vector_idx_scalar() {
 ; CHECK-LABEL: @vector_idx_scalar(
-; CHECK-NEXT:    ret <4 x i32*> getelementptr (i32, <4 x i32*> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
+; CHECK-NEXT:    ret <4 x ptr> getelementptr (i32, <4 x ptr> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
 ;
-  %gep = getelementptr i32, <4 x i32*> zeroinitializer, i64 1
-  ret <4 x i32*> %gep
+  %gep = getelementptr i32, <4 x ptr> zeroinitializer, i64 1
+  ret <4 x ptr> %gep
 }
 
-define <4 x i32*> @vector_idx_vector() {
+define <4 x ptr> @vector_idx_vector() {
 ; CHECK-LABEL: @vector_idx_vector(
-; CHECK-NEXT:    ret <4 x i32*> getelementptr (i32, <4 x i32*> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
+; CHECK-NEXT:    ret <4 x ptr> getelementptr (i32, <4 x ptr> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
 ;
-  %gep = getelementptr i32, <4 x i32*> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
-  ret <4 x i32*> %gep
+  %gep = getelementptr i32, <4 x ptr> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  ret <4 x ptr> %gep
 }
 
 %struct = type { double, float }
-define <4 x float*> @vector_idx_mix_scalar_vector() {
+define <4 x ptr> @vector_idx_mix_scalar_vector() {
 ; CHECK-LABEL: @vector_idx_mix_scalar_vector(
-; CHECK-NEXT:    ret <4 x float*> getelementptr ([[STRUCT:%.*]], <4 x %struct*> zeroinitializer, <4 x i64> zeroinitializer, i32 1)
+; CHECK-NEXT:    ret <4 x ptr> getelementptr ([[STRUCT:%.*]], <4 x ptr> zeroinitializer, <4 x i64> zeroinitializer, i32 1)
 ;
-  %gep = getelementptr %struct, <4 x %struct*> zeroinitializer, i32 0, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  ret <4 x float*> %gep
+  %gep = getelementptr %struct, <4 x ptr> zeroinitializer, i32 0, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x ptr> %gep
 }
 
 ; Constant scalable
 
-define <vscale x 4 x i32*> @scalable_idx_scalar() {
+define <vscale x 4 x ptr> @scalable_idx_scalar() {
 ; CHECK-LABEL: @scalable_idx_scalar(
-; CHECK-NEXT:    ret <vscale x 4 x i32*> getelementptr (i32, <vscale x 4 x i32*> zeroinitializer, <vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT:    ret <vscale x 4 x ptr> getelementptr (i32, <vscale x 4 x ptr> zeroinitializer, <vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer))
 ;
-  %gep = getelementptr i32, <vscale x 4 x i32*> zeroinitializer, i64 1
-  ret <vscale x 4 x i32*> %gep
+  %gep = getelementptr i32, <vscale x 4 x ptr> zeroinitializer, i64 1
+  ret <vscale x 4 x ptr> %gep
 }
 
-define <vscale x 4 x float*> @scalable_vector_idx_mix_scalar_vector() {
+define <vscale x 4 x ptr> @scalable_vector_idx_mix_scalar_vector() {
 ; CHECK-LABEL: @scalable_vector_idx_mix_scalar_vector(
-; CHECK-NEXT:    ret <vscale x 4 x float*> getelementptr ([[STRUCT:%.*]], <vscale x 4 x %struct*> zeroinitializer, <vscale x 4 x i64> zeroinitializer, i32 1)
+; CHECK-NEXT:    ret <vscale x 4 x ptr> getelementptr ([[STRUCT:%.*]], <vscale x 4 x ptr> zeroinitializer, <vscale x 4 x i64> zeroinitializer, i32 1)
 ;
-  %gep = getelementptr %struct, <vscale x 4 x %struct*> zeroinitializer, i32 0, i32 1
-  ret <vscale x 4 x float*> %gep
+  %gep = getelementptr %struct, <vscale x 4 x ptr> zeroinitializer, i32 0, i32 1
+  ret <vscale x 4 x ptr> %gep
 }
 
-define <vscale x 2 x i64*> @ptr_idx_mix_scalar_scalable_vector() {
+define <vscale x 2 x ptr> @ptr_idx_mix_scalar_scalable_vector() {
 ; CHECK-LABEL: @ptr_idx_mix_scalar_scalable_vector(
-; CHECK-NEXT:    ret <vscale x 2 x i64*> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 2 x ptr> zeroinitializer
 ;
-  %v = getelementptr [2 x i64], [2 x i64]* null, i64 0, <vscale x 2 x i64> zeroinitializer
-  ret <vscale x 2 x i64*> %v
+  %v = getelementptr [2 x i64], ptr null, i64 0, <vscale x 2 x i64> zeroinitializer
+  ret <vscale x 2 x ptr> %v
 }
 
 ; Check ConstantExpr::getGetElementPtr() using ElementCount for size queries - end.
 
-define i8* @poison() {
+define ptr @poison() {
 ; CHECK-LABEL: @poison(
-; CHECK-NEXT:    ret i8* poison
+; CHECK-NEXT:    ret ptr poison
 ;
-  %v = getelementptr i8, i8* poison, i64 1
-  ret i8* %v
+  %v = getelementptr i8, ptr poison, i64 1
+  ret ptr %v
 }
 
-define i8* @poison2(i8* %baseptr) {
+define ptr @poison2(ptr %baseptr) {
 ; CHECK-LABEL: @poison2(
-; CHECK-NEXT:    ret i8* poison
+; CHECK-NEXT:    ret ptr poison
 ;
-  %v = getelementptr i8, i8* %baseptr, i64 poison
-  ret i8* %v
+  %v = getelementptr i8, ptr %baseptr, i64 poison
+  ret ptr %v
 }
 
-define i8* @D98611_1(i8* %c1, i64 %offset) {
+define ptr @D98611_1(ptr %c1, i64 %offset) {
 ; CHECK-LABEL: @D98611_1(
-; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds i8, i8* [[C1:%.*]], i64 [[OFFSET:%.*]]
-; CHECK-NEXT:    ret i8* [[C2]]
+; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds i8, ptr [[C1:%.*]], i64 [[OFFSET:%.*]]
+; CHECK-NEXT:    ret ptr [[C2]]
 ;
-  %c2 = getelementptr inbounds i8, i8* %c1, i64 %offset
-  %ptrtoint1 = ptrtoint i8* %c1 to i64
-  %ptrtoint2 = ptrtoint i8* %c2 to i64
+  %c2 = getelementptr inbounds i8, ptr %c1, i64 %offset
+  %ptrtoint1 = ptrtoint ptr %c1 to i64
+  %ptrtoint2 = ptrtoint ptr %c2 to i64
   %sub = sub i64 %ptrtoint2, %ptrtoint1
-  %gep = getelementptr inbounds i8, i8* %c1, i64 %sub
-  ret i8* %gep
+  %gep = getelementptr inbounds i8, ptr %c1, i64 %sub
+  ret ptr %gep
 }
 
-define %struct.A* @D98611_2(%struct.A* %c1, i64 %offset) {
+define ptr @D98611_2(ptr %c1, i64 %offset) {
 ; CHECK-LABEL: @D98611_2(
-; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], %struct.A* [[C1:%.*]], i64 [[OFFSET:%.*]]
-; CHECK-NEXT:    ret %struct.A* [[C2]]
+; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[C1:%.*]], i64 [[OFFSET:%.*]]
+; CHECK-NEXT:    ret ptr [[C2]]
 ;
-  %c2 = getelementptr inbounds %struct.A, %struct.A* %c1, i64 %offset
-  %ptrtoint1 = ptrtoint %struct.A* %c1 to i64
-  %ptrtoint2 = ptrtoint %struct.A* %c2 to i64
+  %c2 = getelementptr inbounds %struct.A, ptr %c1, i64 %offset
+  %ptrtoint1 = ptrtoint ptr %c1 to i64
+  %ptrtoint2 = ptrtoint ptr %c2 to i64
   %sub = sub i64 %ptrtoint2, %ptrtoint1
   %sdiv = sdiv exact i64 %sub, 7
-  %gep = getelementptr inbounds %struct.A, %struct.A* %c1, i64 %sdiv
-  ret %struct.A* %gep
+  %gep = getelementptr inbounds %struct.A, ptr %c1, i64 %sdiv
+  ret ptr %gep
 }
 
-define i32* @D98611_3(i32* %c1, i64 %offset) {
+define ptr @D98611_3(ptr %c1, i64 %offset) {
 ; CHECK-LABEL: @D98611_3(
-; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds i32, i32* [[C1:%.*]], i64 [[OFFSET:%.*]]
-; CHECK-NEXT:    ret i32* [[C2]]
+; CHECK-NEXT:    [[C2:%.*]] = getelementptr inbounds i32, ptr [[C1:%.*]], i64 [[OFFSET:%.*]]
+; CHECK-NEXT:    ret ptr [[C2]]
 ;
-  %c2 = getelementptr inbounds i32, i32* %c1, i64 %offset
-  %ptrtoint1 = ptrtoint i32* %c1 to i64
-  %ptrtoint2 = ptrtoint i32* %c2 to i64
+  %c2 = getelementptr inbounds i32, ptr %c1, i64 %offset
+  %ptrtoint1 = ptrtoint ptr %c1 to i64
+  %ptrtoint2 = ptrtoint ptr %c2 to i64
   %sub = sub i64 %ptrtoint2, %ptrtoint1
   %ashr = ashr exact i64 %sub, 2
-  %gep = getelementptr inbounds i32, i32* %c1, i64 %ashr
-  ret i32* %gep
+  %gep = getelementptr inbounds i32, ptr %c1, i64 %ashr
+  ret ptr %gep
 }
 
-define <8 x i32*> @gep_vector_index_op2_poison([144 x i32]* %ptr) {
+define <8 x ptr> @gep_vector_index_op2_poison(ptr %ptr) {
 ; CHECK-LABEL: @gep_vector_index_op2_poison(
-; CHECK-NEXT:    ret <8 x i32*> poison
+; CHECK-NEXT:    ret <8 x ptr> poison
 ;
-  %res = getelementptr inbounds [144 x i32], [144 x i32]* %ptr, i64 0, <8 x i64> poison
-  ret <8 x i32*> %res
+  %res = getelementptr inbounds [144 x i32], ptr %ptr, i64 0, <8 x i64> poison
+  ret <8 x ptr> %res
 }
 
 %t.1 = type { i32, [144 x i32] }
 
-define <8 x i32*> @gep_vector_index_op3_poison(%t.1* %ptr) {
+define <8 x ptr> @gep_vector_index_op3_poison(ptr %ptr) {
 ; CHECK-LABEL: @gep_vector_index_op3_poison(
-; CHECK-NEXT:    ret <8 x i32*> poison
+; CHECK-NEXT:    ret <8 x ptr> poison
 ;
-  %res = getelementptr inbounds %t.1, %t.1* %ptr, i64 0, i32 1, <8 x i64> poison
-  ret <8 x i32*> %res
+  %res = getelementptr inbounds %t.1, ptr %ptr, i64 0, i32 1, <8 x i64> poison
+  ret <8 x ptr> %res
 }
 
 %t.2 = type { i32, i32 }
 %t.3 = type { i32, [144 x %t.2 ] }
 
-define <8 x i32*> @gep_vector_index_op3_poison_constant_index_afterwards(%t.3* %ptr) {
+define <8 x ptr> @gep_vector_index_op3_poison_constant_index_afterwards(ptr %ptr) {
 ; CHECK-LABEL: @gep_vector_index_op3_poison_constant_index_afterwards(
-; CHECK-NEXT:    ret <8 x i32*> poison
+; CHECK-NEXT:    ret <8 x ptr> poison
 ;
-  %res = getelementptr inbounds %t.3, %t.3* %ptr, i64 0, i32 1, <8 x i64> poison, i32 1
-  ret <8 x i32*> %res
+  %res = getelementptr inbounds %t.3, ptr %ptr, i64 0, i32 1, <8 x i64> poison, i32 1
+  ret <8 x ptr> %res
 }

diff  --git a/llvm/test/Transforms/InstSimplify/icmp.ll b/llvm/test/Transforms/InstSimplify/icmp.ll
index eaec21c41fc57..88f4d42c74039 100644
--- a/llvm/test/Transforms/InstSimplify/icmp.ll
+++ b/llvm/test/Transforms/InstSimplify/icmp.ll
@@ -3,19 +3,19 @@
 
 target datalayout = "e-p:64:64:64-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 
-declare void @usei8ptr(i8* %ptr)
+declare void @usei8ptr(ptr %ptr)
 
 ; Ensure that we do not crash when looking at such a weird bitcast.
-define i1 @bitcast_from_single_element_pointer_vector_to_pointer(<1 x i8*> %ptr1vec, i8* %ptr2) {
+define i1 @bitcast_from_single_element_pointer_vector_to_pointer(<1 x ptr> %ptr1vec, ptr %ptr2) {
 ; CHECK-LABEL: @bitcast_from_single_element_pointer_vector_to_pointer(
-; CHECK-NEXT:    [[PTR1:%.*]] = bitcast <1 x i8*> [[PTR1VEC:%.*]] to i8*
-; CHECK-NEXT:    call void @usei8ptr(i8* [[PTR1]])
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[PTR1]], [[PTR2:%.*]]
+; CHECK-NEXT:    [[PTR1:%.*]] = bitcast <1 x ptr> [[PTR1VEC:%.*]] to ptr
+; CHECK-NEXT:    call void @usei8ptr(ptr [[PTR1]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[PTR1]], [[PTR2:%.*]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %ptr1 = bitcast <1 x i8*> %ptr1vec to i8*
-  call void @usei8ptr(i8* %ptr1)
-  %cmp = icmp eq i8* %ptr1, %ptr2
+  %ptr1 = bitcast <1 x ptr> %ptr1vec to ptr
+  call void @usei8ptr(ptr %ptr1)
+  %cmp = icmp eq ptr %ptr1, %ptr2
   ret i1 %cmp
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/insertelement.ll b/llvm/test/Transforms/InstSimplify/insertelement.ll
index 25697d77e197a..55fab36ddca15 100644
--- a/llvm/test/Transforms/InstSimplify/insertelement.ll
+++ b/llvm/test/Transforms/InstSimplify/insertelement.ll
@@ -106,7 +106,7 @@ define void @PR43218() {
 ; CHECK-NEXT:    ret void
 ; CHECK:       unreachable_infloop:
 ; CHECK-NEXT:    [[EXTRACT:%.*]] = extractelement <2 x i64> [[BOGUS:%.*]], i32 0
-; CHECK-NEXT:    [[T0:%.*]] = inttoptr i64 [[EXTRACT]] to i16****
+; CHECK-NEXT:    [[T0:%.*]] = inttoptr i64 [[EXTRACT]] to ptr
 ; CHECK-NEXT:    [[BOGUS]] = insertelement <2 x i64> [[BOGUS]], i64 undef, i32 1
 ; CHECK-NEXT:    br label [[UNREACHABLE_INFLOOP:%.*]]
 ;
@@ -115,7 +115,7 @@ end:
 
 unreachable_infloop:
   %extract = extractelement <2 x i64> %bogus, i32 0
-  %t0 = inttoptr i64 %extract to i16****
+  %t0 = inttoptr i64 %extract to ptr
   %bogus = insertelement <2 x i64> %bogus, i64 undef, i32 1
   br label %unreachable_infloop
 }

diff  --git a/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
index 6a4fe26861177..06323267e196b 100644
--- a/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
+++ b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll
@@ -3,7 +3,7 @@
 
 %struct.wobble = type { i8 }
 
-define i32 @main() local_unnamed_addr personality i8* undef {
+define i32 @main() local_unnamed_addr personality ptr undef {
 bb12:
   br i1 false, label %bb13, label %bb28
 
@@ -11,7 +11,7 @@ bb13:                                             ; preds = %bb12
   br label %bb14
 
 bb14:                                             ; preds = %bb26, %bb13
-  %tmp15 = phi i8* [ %tmp27, %bb26 ], [ undef, %bb13 ]
+  %tmp15 = phi ptr [ %tmp27, %bb26 ], [ undef, %bb13 ]
   %tmp16 = icmp slt i32 5, undef
   %tmp17 = select i1 false, i1 true, i1 %tmp16
   br label %bb18
@@ -24,7 +24,7 @@ bb19:                                             ; preds = %bb18
   br label %bb21
 
 bb21:                                             ; preds = %bb19, %bb18
-  %tmp22 = load i8, i8* %tmp15, align 1
+  %tmp22 = load i8, ptr %tmp15, align 1
   br label %bb23
 
 bb23:                                             ; preds = %bb21
@@ -34,18 +34,18 @@ bb24:                                             ; preds = %bb23
   br label %bb25
 
 bb25:                                             ; preds = %bb24, %bb23
-  invoke void undef(%struct.wobble* undef, i32 0, i32 undef, i8 %tmp22)
+  invoke void undef(ptr undef, i32 0, i32 undef, i8 %tmp22)
           to label %bb26 unwind label %bb33
 
 bb26:                                             ; preds = %bb25
-  %tmp27 = getelementptr inbounds i8, i8* %tmp15, i64 1
+  %tmp27 = getelementptr inbounds i8, ptr %tmp15, i64 1
   br label %bb14
 
 bb28:                                             ; preds = %bb12
   unreachable
 
 bb33:                                             ; preds = %bb25
-  %tmp34 = landingpad { i8*, i32 }
+  %tmp34 = landingpad { ptr, i32 }
           cleanup
   unreachable
 }

diff  --git a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
index f1ee1528e8acf..661b216e9474d 100644
--- a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
+++ b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll
@@ -4,18 +4,16 @@
 @A = linkonce_odr hidden constant { i64, i64 } { i64 2, i64 3 }
 @B = linkonce_odr hidden global { i64, i64 } { i64 2, i64 3 }
 
-declare i8* @llvm.strip.invariant.group.p0i8(i8* %p)
-declare i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+declare ptr @llvm.strip.invariant.group.p0(ptr %p)
+declare ptr @llvm.launder.invariant.group.p0(ptr %p)
 
 define i64 @f() {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:    ret i64 3
 ;
-  %p = bitcast { i64, i64 }* @A to i8*
-  %a = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
-  %b = getelementptr i8, i8* %a, i32 8
-  %c = bitcast i8* %b to i64*
-  %d = load i64, i64* %c
+  %a = call ptr @llvm.strip.invariant.group.p0(ptr @A)
+  %b = getelementptr i8, ptr %a, i32 8
+  %d = load i64, ptr %b
   ret i64 %d
 }
 
@@ -23,58 +21,47 @@ define i64 @g() {
 ; CHECK-LABEL: @g(
 ; CHECK-NEXT:    ret i64 3
 ;
-  %p = bitcast { i64, i64 }* @A to i8*
-  %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  %b = getelementptr i8, i8* %a, i32 8
-  %c = bitcast i8* %b to i64*
-  %d = load i64, i64* %c
+  %a = call ptr @llvm.launder.invariant.group.p0(ptr @A)
+  %b = getelementptr i8, ptr %a, i32 8
+  %d = load i64, ptr %b
   ret i64 %d
 }
 
 define i64 @notconstantglobal() {
 ; CHECK-LABEL: @notconstantglobal(
-; CHECK-NEXT:    [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @B to i8*))
-; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT:    [[D:%.*]] = load i64, i64* [[C]], align 4
+; CHECK-NEXT:    [[A:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr @B)
+; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, ptr [[A]], i32 8
+; CHECK-NEXT:    [[D:%.*]] = load i64, ptr [[B]], align 4
 ; CHECK-NEXT:    ret i64 [[D]]
 ;
-  %p = bitcast { i64, i64 }* @B to i8*
-  %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  %b = getelementptr i8, i8* %a, i32 8
-  %c = bitcast i8* %b to i64*
-  %d = load i64, i64* %c
+  %a = call ptr @llvm.launder.invariant.group.p0(ptr @B)
+  %b = getelementptr i8, ptr %a, i32 8
+  %d = load i64, ptr %b
   ret i64 %d
 }
 
 define i64 @notconstantgepindex(i32 %i) {
 ; CHECK-LABEL: @notconstantgepindex(
-; CHECK-NEXT:    [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, i8* [[A]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT:    [[D:%.*]] = load i64, i64* [[C]], align 4
+; CHECK-NEXT:    [[A:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr @A)
+; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, ptr [[A]], i32 [[I:%.*]]
+; CHECK-NEXT:    [[D:%.*]] = load i64, ptr [[B]], align 4
 ; CHECK-NEXT:    ret i64 [[D]]
 ;
-  %p = bitcast { i64, i64 }* @A to i8*
-  %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  %b = getelementptr i8, i8* %a, i32 %i
-  %c = bitcast i8* %b to i64*
-  %d = load i64, i64* %c
+  %a = call ptr @llvm.launder.invariant.group.p0(ptr @A)
+  %b = getelementptr i8, ptr %a, i32 %i
+  %d = load i64, ptr %b
   ret i64 %d
 }
 
 define i64 @volatile() {
 ; CHECK-LABEL: @volatile(
-; CHECK-NEXT:    [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
-; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
-; CHECK-NEXT:    [[C:%.*]] = bitcast i8* [[B]] to i64*
-; CHECK-NEXT:    [[D:%.*]] = load volatile i64, i64* [[C]], align 4
+; CHECK-NEXT:    [[A:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr @A)
+; CHECK-NEXT:    [[B:%.*]] = getelementptr i8, ptr [[A]], i32 8
+; CHECK-NEXT:    [[D:%.*]] = load volatile i64, ptr [[B]], align 4
 ; CHECK-NEXT:    ret i64 [[D]]
 ;
-  %p = bitcast { i64, i64 }* @A to i8*
-  %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  %b = getelementptr i8, i8* %a, i32 8
-  %c = bitcast i8* %b to i64*
-  %d = load volatile i64, i64* %c
+  %a = call ptr @llvm.launder.invariant.group.p0(ptr @A)
+  %b = getelementptr i8, ptr %a, i32 8
+  %d = load volatile i64, ptr %b
   ret i64 %d
 }

diff  --git a/llvm/test/Transforms/InstSimplify/known-non-zero.ll b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
index 83ffba916e669..b647f11af4461 100644
--- a/llvm/test/Transforms/InstSimplify/known-non-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
@@ -103,7 +103,7 @@ exit:
 ; The code below exposed a bug similar to the one exposed by D60846, see the commit 6ea477590085.
 ; In a nutshell, we should not replace %result.0 with 0 here.
 
-define zeroext i8 @update_phi_query_loc_in_recursive_call(i8* nocapture readonly %p){
+define zeroext i8 @update_phi_query_loc_in_recursive_call(ptr nocapture readonly %p){
 ; CHECK-LABEL: @update_phi_query_loc_in_recursive_call(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[FOR_COND:%.*]]
@@ -115,7 +115,7 @@ define zeroext i8 @update_phi_query_loc_in_recursive_call(i8* nocapture readonly
 ; CHECK:       for.cond.cleanup:
 ; CHECK-NEXT:    ret i8 [[RESULT_0]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8, i8* [[P:%.*]], align 1
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[P:%.*]], align 1
 ; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[TMP0]] to i32
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nuw nsw i32 [[SHIFT_0]], 3
 ; CHECK-NEXT:    [[SHL:%.*]] = shl nuw nsw i32 [[CONV]], [[MUL]]
@@ -136,7 +136,7 @@ for.cond.cleanup:                                 ; preds = %for.cond
   ret i8 %result.0
 
 for.body:                                         ; preds = %for.cond
-  %0 = load i8, i8* %p, align 1
+  %0 = load i8, ptr %p, align 1
   %conv = zext i8 %0 to i32
   %mul = shl nuw nsw i32 %shift.0, 3
   %shl = shl nuw nsw i32 %conv, %mul

diff  --git a/llvm/test/Transforms/InstSimplify/load-relative-32.ll b/llvm/test/Transforms/InstSimplify/load-relative-32.ll
index db6ed6ef566c3..236341f0a9797 100644
--- a/llvm/test/Transforms/InstSimplify/load-relative-32.ll
+++ b/llvm/test/Transforms/InstSimplify/load-relative-32.ll
@@ -6,14 +6,14 @@ target triple = "i386-unknown-linux-gnu"
 @a = external global i8
 
 @c1 = constant [3 x i32] [i32 0, i32 0,
-i32 sub (i32 ptrtoint (i8* @a to i32), i32 ptrtoint (i32* getelementptr ([3 x i32], [3 x i32]* @c1, i32 0, i32 2) to i32))
+i32 sub (i32 ptrtoint (ptr @a to i32), i32 ptrtoint (ptr getelementptr ([3 x i32], ptr @c1, i32 0, i32 2) to i32))
 ]
 
 ; CHECK: @f1
-define i8* @f1() {
-  ; CHECK: ret i8* @a
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([3 x i32], [3 x i32]* @c1, i32 0, i32 2) to i8*), i32 0)
-  ret i8* %l
+define ptr @f1() {
+  ; CHECK: ret ptr @a
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([3 x i32], ptr @c1, i32 0, i32 2), i32 0)
+  ret ptr %l
 }
 
-declare i8* @llvm.load.relative.i32(i8*, i32)
+declare ptr @llvm.load.relative.i32(ptr, i32)

diff  --git a/llvm/test/Transforms/InstSimplify/load-relative.ll b/llvm/test/Transforms/InstSimplify/load-relative.ll
index 9728359434f43..75d38d555d329 100644
--- a/llvm/test/Transforms/InstSimplify/load-relative.ll
+++ b/llvm/test/Transforms/InstSimplify/load-relative.ll
@@ -6,70 +6,70 @@ target triple = "x86_64-unknown-linux-gnu"
 @a = external global i8
 @b = external global i8
 
- at c1 = constant i32 trunc (i64 sub (i64 ptrtoint (i8* @a to i64), i64 ptrtoint (i32* @c1 to i64)) to i32)
+ at c1 = constant i32 trunc (i64 sub (i64 ptrtoint (ptr @a to i64), i64 ptrtoint (ptr @c1 to i64)) to i32)
 @c2 = constant [7 x i32] [i32 0, i32 0,
-i32 trunc (i64 sub (i64 ptrtoint (i8* @a to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32),
-i32 trunc (i64 sub (i64 ptrtoint (i8* @b to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32),
-i32 trunc (i64 add (i64 ptrtoint (i8* @b to i64), i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32),
-i32 trunc (i64 sub (i64 ptrtoint (i8* @b to i64), i64 1) to i32),
-i32 trunc (i64 sub (i64 0, i64 ptrtoint (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i32 0, i32 2) to i64)) to i32)
+i32 trunc (i64 sub (i64 ptrtoint (ptr @a to i64), i64 ptrtoint (ptr getelementptr ([7 x i32], ptr @c2, i32 0, i32 2) to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr @b to i64), i64 ptrtoint (ptr getelementptr ([7 x i32], ptr @c2, i32 0, i32 2) to i64)) to i32),
+i32 trunc (i64 add (i64 ptrtoint (ptr @b to i64), i64 ptrtoint (ptr getelementptr ([7 x i32], ptr @c2, i32 0, i32 2) to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr @b to i64), i64 1) to i32),
+i32 trunc (i64 sub (i64 0, i64 ptrtoint (ptr getelementptr ([7 x i32], ptr @c2, i32 0, i32 2) to i64)) to i32)
 ]
 
 ; CHECK: @f1
-define i8* @f1() {
-  ; CHECK: ret i8* @a
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* @c1 to i8*), i32 0)
-  ret i8* %l
+define ptr @f1() {
+  ; CHECK: ret ptr @a
+  %l = call ptr @llvm.load.relative.i32(ptr @c1, i32 0)
+  ret ptr %l
 }
 
 ; CHECK: @f2
-define i8* @f2() {
-  ; CHECK: ret i8* @a
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 0)
-  ret i8* %l
+define ptr @f2() {
+  ; CHECK: ret ptr @a
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i32 0)
+  ret ptr %l
 }
 
 ; CHECK: @f3
-define i8* @f3() {
-  ; CHECK: ret i8* @b
-  %l = call i8* @llvm.load.relative.i64(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i64 4)
-  ret i8* %l
+define ptr @f3() {
+  ; CHECK: ret ptr @b
+  %l = call ptr @llvm.load.relative.i64(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i64 4)
+  ret ptr %l
 }
 
 ; CHECK: @f4
-define i8* @f4() {
-  ; CHECK: ret i8* %
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 1)
-  ret i8* %l
+define ptr @f4() {
+  ; CHECK: ret ptr %
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i32 1)
+  ret ptr %l
 }
 
 ; CHECK: @f5
-define i8* @f5() {
-  ; CHECK: ret i8* %
-  %l = call i8* @llvm.load.relative.i32(i8* zeroinitializer, i32 0)
-  ret i8* %l
+define ptr @f5() {
+  ; CHECK: ret ptr %
+  %l = call ptr @llvm.load.relative.i32(ptr zeroinitializer, i32 0)
+  ret ptr %l
 }
 
 ; CHECK: @f6
-define i8* @f6() {
-  ; CHECK: ret i8* %
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 8)
-  ret i8* %l
+define ptr @f6() {
+  ; CHECK: ret ptr %
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i32 8)
+  ret ptr %l
 }
 
 ; CHECK: @f7
-define i8* @f7() {
-  ; CHECK: ret i8* %
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 12)
-  ret i8* %l
+define ptr @f7() {
+  ; CHECK: ret ptr %
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i32 12)
+  ret ptr %l
 }
 
 ; CHECK: @f8
-define i8* @f8() {
-  ; CHECK: ret i8* %
-  %l = call i8* @llvm.load.relative.i32(i8* bitcast (i32* getelementptr ([7 x i32], [7 x i32]* @c2, i64 0, i64 2) to i8*), i32 16)
-  ret i8* %l
+define ptr @f8() {
+  ; CHECK: ret ptr %
+  %l = call ptr @llvm.load.relative.i32(ptr getelementptr ([7 x i32], ptr @c2, i64 0, i64 2), i32 16)
+  ret ptr %l
 }
 
-declare i8* @llvm.load.relative.i32(i8*, i32)
-declare i8* @llvm.load.relative.i64(i8*, i64)
+declare ptr @llvm.load.relative.i32(ptr, i32)
+declare ptr @llvm.load.relative.i64(ptr, i64)

diff  --git a/llvm/test/Transforms/InstSimplify/load.ll b/llvm/test/Transforms/InstSimplify/load.ll
index 025051c03a3e2..8916b33fac4a1 100644
--- a/llvm/test/Transforms/InstSimplify/load.ll
+++ b/llvm/test/Transforms/InstSimplify/load.ll
@@ -8,7 +8,7 @@ define i32 @crash_on_zeroinit() {
 ; CHECK-LABEL: @crash_on_zeroinit(
 ; CHECK-NEXT:    ret i32 undef
 ;
-  %load = load i32, i32* bitcast ({}* @zeroinit to i32*)
+  %load = load i32, ptr @zeroinit
   ret i32 %load
 }
 
@@ -16,7 +16,7 @@ define i32 @crash_on_undef() {
 ; CHECK-LABEL: @crash_on_undef(
 ; CHECK-NEXT:    ret i32 undef
 ;
-  %load = load i32, i32* bitcast ({}* @undef to i32*)
+  %load = load i32, ptr @undef
   ret i32 %load
 }
 
@@ -26,7 +26,7 @@ define <8 x i32> @partial_load() {
 ; CHECK-LABEL: @partial_load(
 ; CHECK-NEXT:    ret <8 x i32> <i32 0, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48>
 ;
-  %load = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*)
+  %load = load <8 x i32>, ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -1)
   ret <8 x i32> %load
 }
 
@@ -37,6 +37,6 @@ define <3 x float> @load_vec3() {
 ; CHECK-LABEL: @load_vec3(
 ; CHECK-NEXT:    ret <3 x float> undef
 ;
-  %1 = load <3 x float>, <3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1)
+  %1 = load <3 x float>, ptr getelementptr inbounds (<3 x float>, ptr @constvec, i64 1)
   ret <3 x float> %1
 }

diff  --git a/llvm/test/Transforms/InstSimplify/maxmin_intrinsics.ll b/llvm/test/Transforms/InstSimplify/maxmin_intrinsics.ll
index 4c9c2af01f5c2..32ef90a0aec12 100644
--- a/llvm/test/Transforms/InstSimplify/maxmin_intrinsics.ll
+++ b/llvm/test/Transforms/InstSimplify/maxmin_intrinsics.ll
@@ -2137,13 +2137,13 @@ define i8 @umax_add_nuw_2(i8 %x) {
   ret i8 %max
 }
 
-define i8 @umax_range_metadata(i8* %p1, i8* %p2) {
+define i8 @umax_range_metadata(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: @umax_range_metadata(
-; CHECK-NEXT:    [[Y:%.*]] = load i8, i8* [[P2:%.*]], align 1, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT:    [[Y:%.*]] = load i8, ptr [[P2:%.*]], align 1, !range [[RNG0:![0-9]+]]
 ; CHECK-NEXT:    ret i8 [[Y]]
 ;
-  %x = load i8, i8* %p1, !range !{i8 0, i8 10}
-  %y = load i8, i8* %p2, !range !{i8 20, i8 30}
+  %x = load i8, ptr %p1, !range !{i8 0, i8 10}
+  %y = load i8, ptr %p2, !range !{i8 20, i8 30}
   %max = call i8 @llvm.umax.i8(i8 %x, i8 %y)
   ret i8 %max
 }

diff  --git a/llvm/test/Transforms/InstSimplify/noalias-ptr.ll b/llvm/test/Transforms/InstSimplify/noalias-ptr.ll
index 1e82c2f01ac31..bed511ea4d9ff 100644
--- a/llvm/test/Transforms/InstSimplify/noalias-ptr.ll
+++ b/llvm/test/Transforms/InstSimplify/noalias-ptr.ll
@@ -25,25 +25,22 @@ target triple = "x86_64-unknown-linux-gnu"
 define void @_Z2p1v() {
 ; CHECK-LABEL: @_Z2p1v(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I1:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT:    [[I1:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2:[0-9]+]]
 ; CHECK-NEXT:    br i1 false, label [[BB6:%.*]], label [[BB5:%.*]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I1]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I1]]) #[[ATTR3:[0-9]+]]
 ; CHECK-NEXT:    br label [[BB6]]
 ; CHECK:       bb6:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = tail call noalias i8* @_Znam(i64 48) #3
-  %i2 = bitcast i8* %i1 to i32*
-  %i3 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i4 = icmp eq i32* %i2, %i3
+  %i1 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = icmp eq ptr %i1, %mStackData
   br i1 %i4, label %bb6, label %bb5
 
 bb5:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i1) #4
+  call void @_ZdaPv(ptr %i1) #4
   br label %bb6
 
 bb6:                                              ; preds = %bb5, %bb
@@ -53,31 +50,28 @@ bb6:                                              ; preds = %bb5, %bb
 define void @_Z2p2bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p2bb(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
 ; CHECK-NEXT:    br i1 false, label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g2
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g2
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -87,31 +81,28 @@ bb8:                                              ; preds = %bb7, %bb
 define void @_Z2p4bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p4bb(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
 ; CHECK-NEXT:    br i1 false, label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g3
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g3
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -121,31 +112,28 @@ bb8:                                              ; preds = %bb7, %bb
 define void @_Z2p5bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p5bb(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
 ; CHECK-NEXT:    br i1 false, label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g4
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g4
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -155,71 +143,63 @@ bb8:                                              ; preds = %bb7, %bb
 define void @_Z2p6bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p6bb(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
 ; CHECK-NEXT:    br i1 false, label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g5
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g5
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
   ret void
 }
 
-define void @_Z4nopebbPi(i1 zeroext %b1, i1 zeroext %b2, i32* readnone %q) {
+define void @_Z4nopebbPi(i1 zeroext %b1, i1 zeroext %b2, ptr readnone %q) {
 ; CHECK-LABEL: @_Z4nopebbPi(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[MSTACKDATA:%.*]] = alloca [10 x i32], align 16
-; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[MSTACKDATA]], i64 0, i64 0
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], i32* [[I1]], i32* [[Q:%.*]]
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], i8* [[I3]], i8* [[I4]]
-; CHECK-NEXT:    [[I5:%.*]] = bitcast i8* [[DOTV]] to i32*
-; CHECK-NEXT:    [[I6:%.*]] = icmp eq i32* [[I5]], [[I2]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], ptr [[MSTACKDATA]], ptr [[Q:%.*]]
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], ptr [[I3]], ptr [[I4]]
+; CHECK-NEXT:    [[I6:%.*]] = icmp eq ptr [[DOTV]], [[I2]]
 ; CHECK-NEXT:    br i1 [[I6]], label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* %q
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr %q
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -230,36 +210,31 @@ define void @_Z2p3bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p3bb(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[MSTACKDATA:%.*]] = alloca [10 x i32], align 16
-; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[MSTACKDATA]], i64 0, i64 0
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], i32* [[I1]], i32* @g1
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], i8* [[I3]], i8* [[I4]]
-; CHECK-NEXT:    [[I5:%.*]] = bitcast i8* [[DOTV]] to i32*
-; CHECK-NEXT:    [[I6:%.*]] = icmp eq i32* [[I5]], [[I2]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], ptr [[MSTACKDATA]], ptr @g1
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], ptr [[I3]], ptr [[I4]]
+; CHECK-NEXT:    [[I6:%.*]] = icmp eq ptr [[DOTV]], [[I2]]
 ; CHECK-NEXT:    br i1 [[I6]], label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g1
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g1
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -270,36 +245,31 @@ define void @_Z2p7bb(i1 zeroext %b1, i1 zeroext %b2) {
 ; CHECK-LABEL: @_Z2p7bb(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[MSTACKDATA:%.*]] = alloca [10 x i32], align 16
-; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[MSTACKDATA]], i64 0, i64 0
-; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], i32* [[I1]], i32* @g6
-; CHECK-NEXT:    [[I3:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I4:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], i8* [[I3]], i8* [[I4]]
-; CHECK-NEXT:    [[I5:%.*]] = bitcast i8* [[DOTV]] to i32*
-; CHECK-NEXT:    [[I6:%.*]] = icmp eq i32* [[I5]], [[I2]]
+; CHECK-NEXT:    [[I2:%.*]] = select i1 [[B1:%.*]], ptr [[MSTACKDATA]], ptr @g6
+; CHECK-NEXT:    [[I3:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[DOTV:%.*]] = select i1 [[B2:%.*]], ptr [[I3]], ptr [[I4]]
+; CHECK-NEXT:    [[I6:%.*]] = icmp eq ptr [[DOTV]], [[I2]]
 ; CHECK-NEXT:    br i1 [[I6]], label [[BB8:%.*]], label [[BB7:%.*]]
 ; CHECK:       bb7:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I3]]) #[[ATTR3]]
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I4]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I3]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I4]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB8]]
 ; CHECK:       bb8:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i2 = select i1 %b1, i32* %i1, i32* @g6
-  %i3 = tail call noalias i8* @_Znam(i64 48) #3
-  %i4 = tail call noalias i8* @_Znam(i64 48) #3
-  %.v = select i1 %b2, i8* %i3, i8* %i4
-  %i5 = bitcast i8* %.v to i32*
-  %i6 = icmp eq i32* %i5, %i2
+  %i2 = select i1 %b1, ptr %mStackData, ptr @g6
+  %i3 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = tail call noalias ptr @_Znam(i64 48) #3
+  %.v = select i1 %b2, ptr %i3, ptr %i4
+  %i6 = icmp eq ptr %.v, %i2
   br i1 %i6, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i3) #4
-  call void @_ZdaPv(i8* %i4) #4
+  call void @_ZdaPv(ptr %i3) #4
+  call void @_ZdaPv(ptr %i4) #4
   br label %bb8
 
 bb8:                                              ; preds = %bb7, %bb
@@ -310,28 +280,23 @@ define void @_Z2p2v(i32 %c) {
 ; CHECK-LABEL: @_Z2p2v(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[MSTACKDATA:%.*]] = alloca [10 x i32], i32 [[C:%.*]], align 16
-; CHECK-NEXT:    [[I1:%.*]] = tail call noalias i8* @_Znam(i64 48) #[[ATTR2]]
-; CHECK-NEXT:    [[I2:%.*]] = bitcast i8* [[I1]] to i32*
-; CHECK-NEXT:    [[I3:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[MSTACKDATA]], i64 0, i64 0
-; CHECK-NEXT:    [[I4:%.*]] = icmp eq i32* [[I2]], [[I3]]
+; CHECK-NEXT:    [[I1:%.*]] = tail call noalias ptr @_Znam(i64 48) #[[ATTR2]]
+; CHECK-NEXT:    [[I4:%.*]] = icmp eq ptr [[I1]], [[MSTACKDATA]]
 ; CHECK-NEXT:    br i1 [[I4]], label [[BB6:%.*]], label [[BB5:%.*]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    call void @_ZdaPv(i8* [[I1]]) #[[ATTR3]]
+; CHECK-NEXT:    call void @_ZdaPv(ptr [[I1]]) #[[ATTR3]]
 ; CHECK-NEXT:    br label [[BB6]]
 ; CHECK:       bb6:
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %mStackData = alloca [10 x i32], i32 %c, align 16
-  %i = bitcast [10 x i32]* %mStackData to i8*
-  %i1 = tail call noalias i8* @_Znam(i64 48) #3
-  %i2 = bitcast i8* %i1 to i32*
-  %i3 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
-  %i4 = icmp eq i32* %i2, %i3
+  %i1 = tail call noalias ptr @_Znam(i64 48) #3
+  %i4 = icmp eq ptr %i1, %mStackData
   br i1 %i4, label %bb6, label %bb5
 
 bb5:                                              ; preds = %bb
-  call void @_ZdaPv(i8* %i1) #4
+  call void @_ZdaPv(ptr %i1) #4
   br label %bb6
 
 bb6:                                              ; preds = %bb5, %bb
@@ -339,10 +304,10 @@ bb6:                                              ; preds = %bb5, %bb
 }
 
 ; Function Attrs: nobuiltin
-declare noalias i8* @_Znam(i64) #1
+declare noalias ptr @_Znam(i64) #1
 
 ; Function Attrs: nobuiltin nounwind
-declare void @_ZdaPv(i8*) #2
+declare void @_ZdaPv(ptr) #2
 
 attributes #1 = { nobuiltin }
 attributes #2 = { nobuiltin nounwind }

diff  --git a/llvm/test/Transforms/InstSimplify/null-ptr-is-valid-attribute.ll b/llvm/test/Transforms/InstSimplify/null-ptr-is-valid-attribute.ll
index 0319348a94a80..61a1c8a69cc15 100644
--- a/llvm/test/Transforms/InstSimplify/null-ptr-is-valid-attribute.ll
+++ b/llvm/test/Transforms/InstSimplify/null-ptr-is-valid-attribute.ll
@@ -2,19 +2,19 @@
 ; RUN: opt -S -passes=instsimplify < %s | FileCheck %s
 
 ; A 0 valued byval pointer may be valid
-define i1 @byval_may_be_zero(i32* byval(i32) %ptr) null_pointer_is_valid {
+define i1 @byval_may_be_zero(ptr byval(i32) %ptr) null_pointer_is_valid {
 ; CHECK-LABEL: @byval_may_be_zero(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[PTR:%.*]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[PTR:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp eq i32* %ptr, null
+  %cmp = icmp eq ptr %ptr, null
   ret i1 %cmp
 }
 
-define i1 @nonnull_may_be_zero(i32* nonnull %ptr) null_pointer_is_valid {
+define i1 @nonnull_may_be_zero(ptr nonnull %ptr) null_pointer_is_valid {
 ; CHECK-LABEL: @nonnull_may_be_zero(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i32* %ptr, null
+  %cmp = icmp eq ptr %ptr, null
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/InstSimplify/null-ptr-is-valid.ll b/llvm/test/Transforms/InstSimplify/null-ptr-is-valid.ll
index cf907b3f154b5..13e92cc0c0fd0 100644
--- a/llvm/test/Transforms/InstSimplify/null-ptr-is-valid.ll
+++ b/llvm/test/Transforms/InstSimplify/null-ptr-is-valid.ll
@@ -4,21 +4,21 @@
 target datalayout = "A5"
 
 ; A 0 valued byval pointer may be valid
-define i1 @byval_may_be_zero(i32 addrspace(5)* byval(i32) %ptr) {
+define i1 @byval_may_be_zero(ptr addrspace(5) byval(i32) %ptr) {
 ; CHECK-LABEL: @byval_may_be_zero(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(5)* [[PTR:%.*]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr addrspace(5) [[PTR:%.*]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %cmp = icmp eq i32 addrspace(5)* %ptr, null
+  %cmp = icmp eq ptr addrspace(5) %ptr, null
   ret i1 %cmp
 }
 
 ; FIXME: The interpretation of nonnull assumes a 0 pointer value, so
 ; this really is an incorrect fold.
-define i1 @nonnull_may_be_zero(i32 addrspace(5)* nonnull %ptr) {
+define i1 @nonnull_may_be_zero(ptr addrspace(5) nonnull %ptr) {
 ; CHECK-LABEL: @nonnull_may_be_zero(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %cmp = icmp eq i32 addrspace(5)* %ptr, null
+  %cmp = icmp eq ptr addrspace(5) %ptr, null
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/InstSimplify/opaque-ptr.ll b/llvm/test/Transforms/InstSimplify/opaque-ptr.ll
index d0d3ac1128e67..af314a24e1883 100644
--- a/llvm/test/Transforms/InstSimplify/opaque-ptr.ll
+++ b/llvm/test/Transforms/InstSimplify/opaque-ptr.ll
@@ -58,7 +58,7 @@ define ptr @constexpr_zero_gep_scalar_base_scalar_index() {
 ; CHECK-LABEL: @constexpr_zero_gep_scalar_base_scalar_index(
 ; CHECK-NEXT:    ret ptr @g
 ;
-  ret ptr getelementptr ([2 x i32], ptr @g, i64 0, i64 0)
+  ret ptr @g
 }
 
 define <2 x ptr> @constexpr_zero_gep_vector_base_scalar_index() {

diff  --git a/llvm/test/Transforms/InstSimplify/past-the-end.ll b/llvm/test/Transforms/InstSimplify/past-the-end.ll
index e213f068e0dd1..98285fb7bec15 100644
--- a/llvm/test/Transforms/InstSimplify/past-the-end.ll
+++ b/llvm/test/Transforms/InstSimplify/past-the-end.ll
@@ -13,7 +13,7 @@ define zeroext i1 @no_offsets() {
 ; CHECK-LABEL: @no_offsets(
 ; CHECK:         ret i1 false
 ;
-  %t = icmp eq i32* @opte_a, @opte_b
+  %t = icmp eq ptr @opte_a, @opte_b
   ret i1 %t
 }
 
@@ -21,11 +21,11 @@ define zeroext i1 @no_offsets() {
 
 define zeroext i1 @both_past_the_end() {
 ; CHECK-LABEL: @both_past_the_end(
-; CHECK:         ret i1 icmp eq (i32* getelementptr inbounds (i32, i32* @opte_a, i32 1), i32* getelementptr inbounds (i32, i32* @opte_b, i32 1))
+; CHECK:         ret i1 icmp eq (ptr getelementptr inbounds (i32, ptr @opte_a, i32 1), ptr getelementptr inbounds (i32, ptr @opte_b, i32 1))
 ;
-  %x = getelementptr i32, i32* @opte_a, i32 1
-  %y = getelementptr i32, i32* @opte_b, i32 1
-  %t = icmp eq i32* %x, %y
+  %x = getelementptr i32, ptr @opte_a, i32 1
+  %y = getelementptr i32, ptr @opte_b, i32 1
+  %t = icmp eq ptr %x, %y
   ret i1 %t
   ; TODO: refine this
 }
@@ -35,10 +35,10 @@ define zeroext i1 @both_past_the_end() {
 
 define zeroext i1 @just_one_past_the_end() {
 ; CHECK-LABEL: @just_one_past_the_end(
-; CHECK:         ret i1 icmp eq (i32* getelementptr inbounds (i32, i32* @opte_a, i32 1), i32* @opte_b)
+; CHECK:         ret i1 icmp eq (ptr getelementptr inbounds (i32, ptr @opte_a, i32 1), ptr @opte_b)
 ;
-  %x = getelementptr i32, i32* @opte_a, i32 1
-  %t = icmp eq i32* %x, @opte_b
+  %x = getelementptr i32, ptr @opte_a, i32 1
+  %t = icmp eq ptr %x, @opte_b
   ret i1 %t
 }
 
@@ -50,7 +50,7 @@ define zeroext i1 @no_alloca_offsets() {
 ;
   %m = alloca i32
   %n = alloca i32
-  %t = icmp eq i32* %m, %n
+  %t = icmp eq ptr %m, %n
   ret i1 %t
 }
 
@@ -60,16 +60,16 @@ define zeroext i1 @both_past_the_end_alloca() {
 ; CHECK-LABEL: @both_past_the_end_alloca(
 ; CHECK:         [[M:%.*]] = alloca i32
 ; CHECK-NEXT:    [[N:%.*]] = alloca i32
-; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, i32* [[M]], i32 1
-; CHECK-NEXT:    [[Y:%.*]] = getelementptr i32, i32* [[N]], i32 1
-; CHECK-NEXT:    [[T:%.*]] = icmp eq i32* [[X]], [[Y]]
+; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, ptr [[M]], i32 1
+; CHECK-NEXT:    [[Y:%.*]] = getelementptr i32, ptr [[N]], i32 1
+; CHECK-NEXT:    [[T:%.*]] = icmp eq ptr [[X]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[T]]
 ;
   %m = alloca i32
   %n = alloca i32
-  %x = getelementptr i32, i32* %m, i32 1
-  %y = getelementptr i32, i32* %n, i32 1
-  %t = icmp eq i32* %x, %y
+  %x = getelementptr i32, ptr %m, i32 1
+  %y = getelementptr i32, ptr %n, i32 1
+  %t = icmp eq ptr %x, %y
   ret i1 %t
   ; TODO: refine this
 }
@@ -81,13 +81,13 @@ define zeroext i1 @just_one_past_the_end_alloca() {
 ; CHECK-LABEL: @just_one_past_the_end_alloca(
 ; CHECK:         [[M:%.*]] = alloca i32
 ; CHECK-NEXT:    [[N:%.*]] = alloca i32
-; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, i32* [[M]], i32 1
-; CHECK-NEXT:    [[T:%.*]] = icmp eq i32* [[X]], [[N]]
+; CHECK-NEXT:    [[X:%.*]] = getelementptr i32, ptr [[M]], i32 1
+; CHECK-NEXT:    [[T:%.*]] = icmp eq ptr [[X]], [[N]]
 ; CHECK-NEXT:    ret i1 [[T]]
 ;
   %m = alloca i32
   %n = alloca i32
-  %x = getelementptr i32, i32* %m, i32 1
-  %t = icmp eq i32* %x, %n
+  %x = getelementptr i32, ptr %m, i32 1
+  %t = icmp eq ptr %x, %n
   ret i1 %t
 }

diff  --git a/llvm/test/Transforms/InstSimplify/phi-cse.ll b/llvm/test/Transforms/InstSimplify/phi-cse.ll
index bb0a01f5d84d0..327cf24be1740 100644
--- a/llvm/test/Transforms/InstSimplify/phi-cse.ll
+++ b/llvm/test/Transforms/InstSimplify/phi-cse.ll
@@ -6,7 +6,7 @@
 ; from one another.
 
 ; Most basic case, fully identical PHI nodes
-define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test0(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test0(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -17,8 +17,8 @@ define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -33,13 +33,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Fully identical PHI nodes, but order of operands 
diff ers
-define void @test1(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test1(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -50,8 +50,8 @@ define void @test1(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -66,13 +66,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Different incoming values in second PHI
-define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -83,8 +83,8 @@ define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -99,11 +99,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v2, %b1 ] ; from %b0 takes %v2 instead of %v1
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
-define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -114,8 +114,8 @@ define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V2:%.*]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -130,11 +130,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v2, %b1 ], [ %v0, %b0 ] ; from %b0 takes %v2 instead of %v1
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
-define void @negative_test4(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test4(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -145,8 +145,8 @@ define void @negative_test4(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -161,13 +161,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ] ; incoming values are swapped
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Both PHI's are identical, but the first one has no uses, so ignore it.
-define void @test5(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test5(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -177,7 +177,7 @@ define void @test5(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -192,11 +192,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i1, i32* %d1
+  store i32 %i1, ptr %d1
   ret void
 }
 ; Second PHI has no uses
-define void @test6(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test6(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -206,7 +206,7 @@ define void @test6(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -221,12 +221,12 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
-  store i32 %i0, i32* %d0
+  store i32 %i0, ptr %d0
   ret void
 }
 
 ; Non-matching PHI node should be ignored without terminating CSE.
-define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -238,9 +238,9 @@ define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -256,12 +256,12 @@ end:
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }
-define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -273,9 +273,9 @@ define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -291,12 +291,12 @@ end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }
-define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -308,9 +308,9 @@ define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -326,8 +326,8 @@ end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }

diff  --git a/llvm/test/Transforms/InstSimplify/phi.ll b/llvm/test/Transforms/InstSimplify/phi.ll
index d91b555db612b..b83ebcbadeab6 100644
--- a/llvm/test/Transforms/InstSimplify/phi.ll
+++ b/llvm/test/Transforms/InstSimplify/phi.ll
@@ -162,7 +162,7 @@ define i64 @pr49839_with_poison(i1 %c) {
 ; CHECK:       if:
 ; CHECK-NEXT:    br label [[JOIN]]
 ; CHECK:       join:
-; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ poison, [[IF]] ], [ srem (i64 1, i64 ptrtoint (i32* @g to i64)), [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ poison, [[IF]] ], [ srem (i64 1, i64 ptrtoint (ptr @g to i64)), [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    ret i64 [[PHI]]
 ;
 entry:
@@ -172,7 +172,7 @@ if:
   br label %join
 
 join:
-  %phi = phi i64 [ poison, %if ], [ srem (i64 1, i64 ptrtoint (i32* @g to i64)) , %entry ]
+  %phi = phi i64 [ poison, %if ], [ srem (i64 1, i64 ptrtoint (ptr @g to i64)) , %entry ]
   ret i64 %phi
 }
 
@@ -183,7 +183,7 @@ define i64 @pr49839_without_poison(i1 %c) {
 ; CHECK:       if:
 ; CHECK-NEXT:    br label [[JOIN]]
 ; CHECK:       join:
-; CHECK-NEXT:    ret i64 srem (i64 1, i64 ptrtoint (i32* @g to i64))
+; CHECK-NEXT:    ret i64 srem (i64 1, i64 ptrtoint (ptr @g to i64))
 ;
 entry:
   br i1 %c, label %if, label %join
@@ -192,6 +192,6 @@ if:
   br label %join
 
 join:
-  %phi = phi i64 [ srem (i64 1, i64 ptrtoint (i32* @g to i64)), %if ], [ srem (i64 1, i64 ptrtoint (i32* @g to i64)) , %entry ]
+  %phi = phi i64 [ srem (i64 1, i64 ptrtoint (ptr @g to i64)), %if ], [ srem (i64 1, i64 ptrtoint (ptr @g to i64)) , %entry ]
   ret i64 %phi
 }

diff  --git a/llvm/test/Transforms/InstSimplify/pr33957.ll b/llvm/test/Transforms/InstSimplify/pr33957.ll
index 256bb89e78617..e7663fc2d589a 100644
--- a/llvm/test/Transforms/InstSimplify/pr33957.ll
+++ b/llvm/test/Transforms/InstSimplify/pr33957.ll
@@ -20,8 +20,8 @@ bb:
   br i1 true, label %bb1, label %bb3
 
 bb1:
-  %tmp = getelementptr inbounds [78 x %struct.bar], [78 x %struct.bar]* @global, i32 0, <4 x i32> undef
-  %tmp2 = getelementptr inbounds %struct.bar, <4 x %struct.bar*> %tmp, i32 1
+  %tmp = getelementptr inbounds [78 x %struct.bar], ptr @global, i32 0, <4 x i32> undef
+  %tmp2 = getelementptr inbounds %struct.bar, <4 x ptr> %tmp, i32 1
   br i1 true, label %bb3, label %bb1
 
 bb3:

diff  --git a/llvm/test/Transforms/InstSimplify/pr49495.ll b/llvm/test/Transforms/InstSimplify/pr49495.ll
index bd3e049802463..8ac457a7cda30 100644
--- a/llvm/test/Transforms/InstSimplify/pr49495.ll
+++ b/llvm/test/Transforms/InstSimplify/pr49495.ll
@@ -2,17 +2,17 @@
 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
 
 ; The first comparison (a != b) should not be dropped
-define i1 @test1(i8* %a, i8* %b) {
+define i1 @test1(ptr %a, ptr %b) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    [[COND1:%.*]] = icmp ne i8* [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 -1
-; CHECK-NEXT:    [[COND2:%.*]] = icmp ugt i8* [[A2]], [[B]]
+; CHECK-NEXT:    [[COND1:%.*]] = icmp ne ptr [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 -1
+; CHECK-NEXT:    [[COND2:%.*]] = icmp ugt ptr [[A2]], [[B]]
 ; CHECK-NEXT:    [[RES:%.*]] = select i1 [[COND1]], i1 [[COND2]], i1 false
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
-  %cond1 = icmp ne i8* %a, %b
-  %a2 = getelementptr inbounds i8, i8* %a, i64 -1
-  %cond2 = icmp ugt i8* %a2, %b
+  %cond1 = icmp ne ptr %a, %b
+  %a2 = getelementptr inbounds i8, ptr %a, i64 -1
+  %cond2 = icmp ugt ptr %a2, %b
   %res = select i1 %cond1, i1 %cond2, i1 false
   ret i1 %res
 }

diff  --git a/llvm/test/Transforms/InstSimplify/ptr_
diff .ll b/llvm/test/Transforms/InstSimplify/ptr_
diff .ll
index c2658d0b9776c..d18b462d9bab1 100644
--- a/llvm/test/Transforms/InstSimplify/ptr_
diff .ll
+++ b/llvm/test/Transforms/InstSimplify/ptr_
diff .ll
@@ -3,61 +3,58 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i64 @ptr
diff 1(i8* %ptr) {
+define i64 @ptr
diff 1(ptr %ptr) {
 ; CHECK-LABEL: @ptr
diff 1(
 ; CHECK:         ret i64 42
 ;
-  %first = getelementptr inbounds i8, i8* %ptr, i32 0
-  %last = getelementptr inbounds i8, i8* %ptr, i32 42
-  %first.int = ptrtoint i8* %first to i64
-  %last.int = ptrtoint i8* %last to i64
+  %last = getelementptr inbounds i8, ptr %ptr, i32 42
+  %first.int = ptrtoint ptr %ptr to i64
+  %last.int = ptrtoint ptr %last to i64
   %
diff  = sub i64 %last.int, %first.int
   ret i64 %
diff 
 }
 
-define i64 @ptr
diff 2(i8* %ptr) {
+define i64 @ptr
diff 2(ptr %ptr) {
 ; CHECK-LABEL: @ptr
diff 2(
 ; CHECK:         ret i64 42
 ;
-  %first1 = getelementptr inbounds i8, i8* %ptr, i32 0
-  %first2 = getelementptr inbounds i8, i8* %first1, i32 1
-  %first3 = getelementptr inbounds i8, i8* %first2, i32 2
-  %first4 = getelementptr inbounds i8, i8* %first3, i32 4
-  %last1 = getelementptr inbounds i8, i8* %first2, i32 48
-  %last2 = getelementptr inbounds i8, i8* %last1, i32 8
-  %last3 = getelementptr inbounds i8, i8* %last2, i32 -4
-  %last4 = getelementptr inbounds i8, i8* %last3, i32 -4
-  %first.int = ptrtoint i8* %first4 to i64
-  %last.int = ptrtoint i8* %last4 to i64
+  %first2 = getelementptr inbounds i8, ptr %ptr, i32 1
+  %first3 = getelementptr inbounds i8, ptr %first2, i32 2
+  %first4 = getelementptr inbounds i8, ptr %first3, i32 4
+  %last1 = getelementptr inbounds i8, ptr %first2, i32 48
+  %last2 = getelementptr inbounds i8, ptr %last1, i32 8
+  %last3 = getelementptr inbounds i8, ptr %last2, i32 -4
+  %last4 = getelementptr inbounds i8, ptr %last3, i32 -4
+  %first.int = ptrtoint ptr %first4 to i64
+  %last.int = ptrtoint ptr %last4 to i64
   %
diff  = sub i64 %last.int, %first.int
   ret i64 %
diff 
 }
 
-define i64 @ptr
diff 3(i8* %ptr) {
+define i64 @ptr
diff 3(ptr %ptr) {
 ; Don't bother with non-inbounds GEPs.
 ; CHECK-LABEL: @ptr
diff 3(
-; CHECK:         [[LAST:%.*]] = getelementptr i8, i8* %ptr, i32 42
-; CHECK-NEXT:    [[FIRST_INT:%.*]] = ptrtoint i8* %ptr to i64
-; CHECK-NEXT:    [[LAST_INT:%.*]] = ptrtoint i8* [[LAST]] to i64
+; CHECK:         [[LAST:%.*]] = getelementptr i8, ptr %ptr, i32 42
+; CHECK-NEXT:    [[FIRST_INT:%.*]] = ptrtoint ptr %ptr to i64
+; CHECK-NEXT:    [[LAST_INT:%.*]] = ptrtoint ptr [[LAST]] to i64
 ; CHECK-NEXT:    [[DIFF:%.*]] = sub i64 [[LAST_INT]], [[FIRST_INT]]
 ; CHECK-NEXT:    ret i64 [[DIFF]]
 ;
-  %first = getelementptr i8, i8* %ptr, i32 0
-  %last = getelementptr i8, i8* %ptr, i32 42
-  %first.int = ptrtoint i8* %first to i64
-  %last.int = ptrtoint i8* %last to i64
+  %last = getelementptr i8, ptr %ptr, i32 42
+  %first.int = ptrtoint ptr %ptr to i64
+  %last.int = ptrtoint ptr %last to i64
   %
diff  = sub i64 %last.int, %first.int
   ret i64 %
diff 
 }
 
-define <4 x i32> @ptr
diff 4(<4 x i8*> %arg) nounwind {
+define <4 x i32> @ptr
diff 4(<4 x ptr> %arg) nounwind {
 ; Handle simple cases of vectors of pointers.
 ; CHECK-LABEL: @ptr
diff 4(
 ; CHECK:         ret <4 x i32> zeroinitializer
 ;
-  %p1 = ptrtoint <4 x i8*> %arg to <4 x i32>
-  %bc = bitcast <4 x i8*> %arg to <4 x i32*>
-  %p2 = ptrtoint <4 x i32*> %bc to <4 x i32>
+  %p1 = ptrtoint <4 x ptr> %arg to <4 x i32>
+  %bc = bitcast <4 x ptr> %arg to <4 x ptr>
+  %p2 = ptrtoint <4 x ptr> %bc to <4 x i32>
   %sub = sub <4 x i32> %p1, %p2
   ret <4 x i32> %sub
 }
@@ -72,13 +69,10 @@ define i32 @ptr
diff 5() nounwind {
 ; CHECK-NEXT:    ret i32 0
 ;
 bb:
-  %tmp = getelementptr inbounds %struct.ham, %struct.ham* @global, i32 0, i32 1
-  %tmp1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %tmp, i32 0, i32 0
-  %tmp2 = bitcast [2 x i32]* %tmp1 to i32*
-  %tmp3 = ptrtoint i32* %tmp2 to i32
-  %tmp4 = getelementptr inbounds %struct.ham, %struct.ham* @global, i32 0, i32 1
-  %tmp5 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %tmp4, i32 0, i32 0
-  %tmp6 = ptrtoint [2 x i32]* %tmp5 to i32
+  %tmp = getelementptr inbounds %struct.ham, ptr @global, i32 0, i32 1
+  %tmp3 = ptrtoint ptr %tmp to i32
+  %tmp4 = getelementptr inbounds %struct.ham, ptr @global, i32 0, i32 1
+  %tmp6 = ptrtoint ptr %tmp4 to i32
   %tmp7 = sub i32 %tmp3, %tmp6
   ret i32 %tmp7
 }

diff  --git a/llvm/test/Transforms/InstSimplify/redundant-null-check-in-uadd_with_overflow-of-nonnull-ptr.ll b/llvm/test/Transforms/InstSimplify/redundant-null-check-in-uadd_with_overflow-of-nonnull-ptr.ll
index adb8097e21ff1..6976e31c2c9fb 100644
--- a/llvm/test/Transforms/InstSimplify/redundant-null-check-in-uadd_with_overflow-of-nonnull-ptr.ll
+++ b/llvm/test/Transforms/InstSimplify/redundant-null-check-in-uadd_with_overflow-of-nonnull-ptr.ll
@@ -7,56 +7,56 @@
 ; that will already catch the get null pointer,
 ; so the separate null check is redundant and can be dropped.
 
-define i1 @t0(i8* nonnull %base, i64 %offset) {
+define i1 @t0(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp uge i64 %adjusted, %base_int
   %res = and i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t1(i8* nonnull %base, i64 %offset) {
+define i1 @t1(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ule i64 %base_int, %adjusted ; swapped
   %res = and i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t2(i8* nonnull %base, i64 %offset) {
+define i1 @t2(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t2(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE_INT]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp uge i64 %adjusted, %base_int
   %res = and i1 %no_overflow_during_adjustment, %non_null_after_adjustment ; swapped
   ret i1 %res
 }
-define i1 @t3(i8* nonnull %base, i64 %offset) {
+define i1 @t3(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t3(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ule i64 [[BASE_INT]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ule i64 %base_int, %adjusted ; swapped
@@ -67,56 +67,56 @@ define i1 @t3(i8* nonnull %base, i64 %offset) {
 ; If the joining operator was 'or', i.e. we check that either we produced non-null
 ; pointer, or no overflow happened, then the overflow check itself is redundant.
 
-define i1 @t4(i8* nonnull %base, i64 %offset) {
+define i1 @t4(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t4(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp uge i64 %adjusted, %base_int
   %res = or i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t5(i8* nonnull %base, i64 %offset) {
+define i1 @t5(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t5(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ule i64 %base_int, %adjusted ; swapped
   %res = or i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t6(i8* nonnull %base, i64 %offset) {
+define i1 @t6(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t6(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp uge i64 %adjusted, %base_int
   %res = or i1 %no_overflow_during_adjustment, %non_null_after_adjustment ; swapped
   ret i1 %res
 }
-define i1 @t7(i8* nonnull %base, i64 %offset) {
+define i1 @t7(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t7(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp ne i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp ne i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ule i64 %base_int, %adjusted ; swapped
@@ -128,56 +128,56 @@ define i1 @t7(i8* nonnull %base, i64 %offset) {
 ; or overflow happens, then again, the standalone null check is redundant and
 ; can be dropped.
 
-define i1 @t8(i8* nonnull %base, i64 %offset) {
+define i1 @t8(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t8(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ult i64 %adjusted, %base_int
   %res = or i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t9(i8* nonnull %base, i64 %offset) {
+define i1 @t9(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t9(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ugt i64 %base_int, %adjusted ; swapped
   %res = or i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t10(i8* nonnull %base, i64 %offset) {
+define i1 @t10(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t10(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE_INT]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ult i64 %adjusted, %base_int
   %res = or i1 %no_overflow_during_adjustment, %non_null_after_adjustment ; swapped
   ret i1 %res
 }
-define i1 @t11(i8* nonnull %base, i64 %offset) {
+define i1 @t11(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t11(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NO_OVERFLOW_DURING_ADJUSTMENT:%.*]] = icmp ugt i64 [[BASE_INT]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_OVERFLOW_DURING_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ugt i64 %base_int, %adjusted ; swapped
@@ -188,56 +188,56 @@ define i1 @t11(i8* nonnull %base, i64 %offset) {
 ; If the joining operator was 'and', i.e. we check that we both get null pointer
 ; AND overflow happens, then the overflow check is redundant.
 
-define i1 @t12(i8* nonnull %base, i64 %offset) {
+define i1 @t12(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t12(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ult i64 %adjusted, %base_int
   %res = and i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t13(i8* nonnull %base, i64 %offset) {
+define i1 @t13(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t13(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ugt i64 %base_int, %adjusted ; swapped
   %res = and i1 %non_null_after_adjustment, %no_overflow_during_adjustment
   ret i1 %res
 }
-define i1 @t14(i8* nonnull %base, i64 %offset) {
+define i1 @t14(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t14(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ult i64 %adjusted, %base_int
   %res = and i1 %no_overflow_during_adjustment, %non_null_after_adjustment ; swapped
   ret i1 %res
 }
-define i1 @t15(i8* nonnull %base, i64 %offset) {
+define i1 @t15(ptr nonnull %base, i64 %offset) {
 ; CHECK-LABEL: @t15(
-; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint i8* [[BASE:%.*]] to i64
+; CHECK-NEXT:    [[BASE_INT:%.*]] = ptrtoint ptr [[BASE:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i64 [[BASE_INT]], [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[NON_NULL_AFTER_ADJUSTMENT:%.*]] = icmp eq i64 [[ADJUSTED]], 0
 ; CHECK-NEXT:    ret i1 [[NON_NULL_AFTER_ADJUSTMENT]]
 ;
-  %base_int = ptrtoint i8* %base to i64
+  %base_int = ptrtoint ptr %base to i64
   %adjusted = add i64 %base_int, %offset
   %non_null_after_adjustment = icmp eq i64 %adjusted, 0
   %no_overflow_during_adjustment = icmp ugt i64 %base_int, %adjusted ; swapped

diff  --git a/llvm/test/Transforms/InstSimplify/remove-dead-call.ll b/llvm/test/Transforms/InstSimplify/remove-dead-call.ll
index 724d3573002d1..4f430958158d3 100755
--- a/llvm/test/Transforms/InstSimplify/remove-dead-call.ll
+++ b/llvm/test/Transforms/InstSimplify/remove-dead-call.ll
@@ -7,20 +7,20 @@
 ;
 ; DETAILS: Made Modification 'Remove redundant instructions' on Function 'main'
 
-define internal void @func_1(i64* nocapture readnone %0) #0 {
+define internal void @func_1(ptr nocapture readnone %0) #0 {
 ; CHECK-LABEL: @func_1(
 ; CHECK-NEXT:    ret void
 ;
   ret void
 }
 
-define i16 @main(i16 %0, i16** nocapture readnone %1) #1 {
+define i16 @main(i16 %0, ptr nocapture readnone %1) #1 {
 ; CHECK-LABEL: @main(
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    unreachable
 ;
 bb1:
-  call void @func_1(i64* undef)
+  call void @func_1(ptr undef)
   unreachable
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/require-dominator.ll b/llvm/test/Transforms/InstSimplify/require-dominator.ll
index f1da236c4f0c6..320aba95a51e4 100644
--- a/llvm/test/Transforms/InstSimplify/require-dominator.ll
+++ b/llvm/test/Transforms/InstSimplify/require-dominator.ll
@@ -7,18 +7,18 @@
 target triple = "x86_64-grtev4-linux-gnu"
 
 ; Function Attrs: nounwind uwtable
-define void @foo(i16 *) #1 align 2 {
+define void @foo(ptr) #1 align 2 {
   br i1 undef, label %exit, label %2
 
 ; <label>:2:
-  %3 = tail call i8* @_Znwm(i64 56) #10
-  %4 = bitcast i8* %3 to i16*
-  %p = load i16*, i16** undef, align 8
-  %5 = icmp eq i16* %p, %4
+  %3 = tail call ptr @_Znwm(i64 56) #10
+  %4 = bitcast ptr %3 to ptr
+  %p = load ptr, ptr undef, align 8
+  %5 = icmp eq ptr %p, %4
   br i1 %5, label %exit, label %6
 
 ; <label>:6:
-  %7 = icmp eq i16* %p, null
+  %7 = icmp eq ptr %p, null
   br i1 %7, label %exit, label %8
 
 ; <label>:8:
@@ -29,4 +29,4 @@ exit:
 }
 
 ; Function Attrs: nobuiltin
-declare i8* @_Znwm(i64)
+declare ptr @_Znwm(i64)

diff  --git a/llvm/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll b/llvm/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll
index 2743676c005e1..0b241ffa5e36e 100644
--- a/llvm/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll
+++ b/llvm/test/Transforms/InstSimplify/result-of-usub-by-nonzero-is-non-zero-and-no-overflow.ll
@@ -5,14 +5,14 @@
 ; that the result is non-zero. This can be simplified just to a comparison
 ; between the base and offset.
 
-define i1 @t0(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t0(i64 %base, ptr nonnull %offsetptr) {
 ; CHECK-LABEL: @t0(
-; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
 ; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
 ;
-  %offset = ptrtoint i64* %offsetptr to i64
+  %offset = ptrtoint ptr %offsetptr to i64
 
   %adjusted = sub i64 %base, %offset
   %no_underflow = icmp uge i64 %adjusted, %base
@@ -21,14 +21,14 @@ define i1 @t0(i64 %base, i64* nonnull %offsetptr) {
   ret i1 %r
 }
 
-define i1 @t1(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t1(i64 %base, ptr nonnull %offsetptr) {
 ; CHECK-LABEL: @t1(
-; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
 ; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
 ;
-  %offset = ptrtoint i64* %offsetptr to i64
+  %offset = ptrtoint ptr %offsetptr to i64
 
   %adjusted = sub i64 %base, %offset
   %no_underflow = icmp ult i64 %adjusted, %base
@@ -37,14 +37,14 @@ define i1 @t1(i64 %base, i64* nonnull %offsetptr) {
   ret i1 %r
 }
 
-define i1 @t2_commutative(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t2_commutative(i64 %base, ptr nonnull %offsetptr) {
 ; CHECK-LABEL: @t2_commutative(
-; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[BASE]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
 ;
-  %offset = ptrtoint i64* %offsetptr to i64
+  %offset = ptrtoint ptr %offsetptr to i64
 
   %adjusted = sub i64 %base, %offset
   %no_underflow = icmp ule i64 %base, %adjusted
@@ -53,14 +53,14 @@ define i1 @t2_commutative(i64 %base, i64* nonnull %offsetptr) {
   ret i1 %r
 }
 
-define i1 @t3_commutative(i64 %base, i64* nonnull %offsetptr) {
+define i1 @t3_commutative(i64 %base, ptr nonnull %offsetptr) {
 ; CHECK-LABEL: @t3_commutative(
-; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
+; CHECK-NEXT:    [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
 ; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
 ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[BASE]], [[ADJUSTED]]
 ; CHECK-NEXT:    ret i1 [[NO_UNDERFLOW]]
 ;
-  %offset = ptrtoint i64* %offsetptr to i64
+  %offset = ptrtoint ptr %offsetptr to i64
 
   %adjusted = sub i64 %base, %offset
   %no_underflow = icmp ugt i64 %base, %adjusted

diff  --git a/llvm/test/Transforms/InstSimplify/returned.ll b/llvm/test/Transforms/InstSimplify/returned.ll
index 320e1f0575762..94a98ac6cb05d 100644
--- a/llvm/test/Transforms/InstSimplify/returned.ll
+++ b/llvm/test/Transforms/InstSimplify/returned.ll
@@ -7,10 +7,8 @@ define i1 @bitcast() {
 ;
   %a = alloca i32
   %b = alloca i64
-  %x = bitcast i32* %a to i8*
-  %z = bitcast i64* %b to i8*
-  %y = call i8* @func1(i8* %z)
-  %cmp = icmp eq i8* %x, %y
+  %y = call ptr @func1(ptr %b)
+  %cmp = icmp eq ptr %a, %y
   ret i1 %cmp
 }
 
@@ -21,13 +19,12 @@ define i1 @gep3() {
 ; CHECK-NEXT:    ret i1 false
 ;
   %x = alloca %gept, align 8
-  %a = getelementptr %gept, %gept* %x, i64 0, i32 0
-  %y = call %gept* @func2(%gept* %x)
-  %b = getelementptr %gept, %gept* %y, i64 0, i32 1
-  %equal = icmp eq i32* %a, %b
+  %y = call ptr @func2(ptr %x)
+  %b = getelementptr %gept, ptr %y, i64 0, i32 1
+  %equal = icmp eq ptr %x, %b
   ret i1 %equal
 }
 
-declare i8* @func1(i8* returned) nounwind readnone willreturn
-declare %gept* @func2(%gept* returned) nounwind readnone willreturn
+declare ptr @func1(ptr returned) nounwind readnone willreturn
+declare ptr @func2(ptr returned) nounwind readnone willreturn
 

diff  --git a/llvm/test/Transforms/InstSimplify/select-implied.ll b/llvm/test/Transforms/InstSimplify/select-implied.ll
index 89e4d75eed27c..a420ad17636fd 100644
--- a/llvm/test/Transforms/InstSimplify/select-implied.ll
+++ b/llvm/test/Transforms/InstSimplify/select-implied.ll
@@ -76,20 +76,20 @@ end:
   ret void
 }
 
-define i8 @PR23333(i8 addrspace(1)* %ptr) {
+define i8 @PR23333(ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: @PR23333(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 addrspace(1)* [[PTR:%.*]], null
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr addrspace(1) [[PTR:%.*]], null
 ; CHECK-NEXT:    br i1 [[CMP]], label [[TAKEN:%.*]], label [[END:%.*]]
 ; CHECK:       taken:
 ; CHECK-NEXT:    ret i8 1
 ; CHECK:       end:
 ; CHECK-NEXT:    ret i8 0
 ;
-  %cmp = icmp eq i8 addrspace(1)* %ptr, null
+  %cmp = icmp eq ptr addrspace(1) %ptr, null
   br i1 %cmp, label %taken, label %end
 
 taken:
-  %cmp2 = icmp ne i8 addrspace(1)* %ptr, null
+  %cmp2 = icmp ne ptr addrspace(1) %ptr, null
   %res = select i1 %cmp2, i8 2, i8 1
   ret i8 %res
 

diff  --git a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
index 3dbb29f85d0f9..9511b6993ddad 100644
--- a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll
@@ -581,15 +581,15 @@ define i64 @select_icmp_x_and_8_ne_0_y64_and_not_8(i32 %x, i64 %y) {
 
 ; Don't crash on a pointer or aggregate type.
 
-define i32* @select_icmp_pointers(i32* %x, i32* %y) {
+define ptr @select_icmp_pointers(ptr %x, ptr %y) {
 ; CHECK-LABEL: @select_icmp_pointers(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32* [[X:%.*]], null
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i32* [[X]], i32* [[Y:%.*]]
-; CHECK-NEXT:    ret i32* [[SEL]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[X:%.*]], null
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], ptr [[X]], ptr [[Y:%.*]]
+; CHECK-NEXT:    ret ptr [[SEL]]
 ;
-  %cmp = icmp slt i32* %x, null
-  %sel = select i1 %cmp, i32* %x, i32* %y
-  ret i32* %sel
+  %cmp = icmp slt ptr %x, null
+  %sel = select i1 %cmp, ptr %x, ptr %y
+  ret ptr %sel
 }
 
 ; If the condition is known, we don't need to select, but we're not
@@ -621,26 +621,26 @@ define i8 @do_not_assume_sel_cond(i1 %cond, i8 %x, i8 %y) {
   ret i8 %sel
 }
 
-define i32* @select_icmp_eq_0_gep_operand(i32* %base, i64 %n) {
+define ptr @select_icmp_eq_0_gep_operand(ptr %base, i64 %n) {
 ; CHECK-LABEL: @select_icmp_eq_0_gep_operand(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    ret i32* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %cond = icmp eq i64 %n, 0
-  %gep = getelementptr i32, i32* %base, i64 %n
-  %r = select i1 %cond, i32* %base, i32* %gep
-  ret i32* %r
+  %gep = getelementptr i32, ptr %base, i64 %n
+  %r = select i1 %cond, ptr %base, ptr %gep
+  ret ptr %r
 }
 
-define i32* @select_icmp_ne_0_gep_operand(i32* %base, i64 %n) {
+define ptr @select_icmp_ne_0_gep_operand(ptr %base, i64 %n) {
 ; CHECK-LABEL: @select_icmp_ne_0_gep_operand(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    ret i32* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %cond = icmp ne i64 %n, 0
-  %gep = getelementptr i32, i32* %base, i64 %n
-  %r = select i1 %cond, i32* %gep, i32* %base
-  ret i32* %r
+  %gep = getelementptr i32, ptr %base, i64 %n
+  %r = select i1 %cond, ptr %gep, ptr %base
+  ret ptr %r
 }
 
 define i1 @and_cmps(i32 %x) {
@@ -845,9 +845,9 @@ define i32 @false_undef_false_freeze(i1 %cond, i32 %x) {
 
 define <2 x i32> @false_undef_true_constextpr_vec(i1 %cond) {
 ; CHECK-LABEL: @false_undef_true_constextpr_vec(
-; CHECK-NEXT:    ret <2 x i32> <i32 ptrtoint (i32* @g to i32), i32 ptrtoint (i32* @g to i32)>
+; CHECK-NEXT:    ret <2 x i32> <i32 ptrtoint (ptr @g to i32), i32 ptrtoint (ptr @g to i32)>
 ;
-  %s = select i1 %cond, <2 x i32> <i32 undef, i32 ptrtoint (i32* @g to i32)>, <2 x i32> <i32 ptrtoint (i32* @g to i32), i32 undef>
+  %s = select i1 %cond, <2 x i32> <i32 undef, i32 ptrtoint (ptr @g to i32)>, <2 x i32> <i32 ptrtoint (ptr @g to i32), i32 undef>
   ret <2 x i32> %s
 }
 
@@ -855,7 +855,7 @@ define i32 @all_constant_true_undef() {
 ; CHECK-LABEL: @all_constant_true_undef(
 ; CHECK-NEXT:    ret i32 1
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_true_undef to i1), i32 undef, i32 1
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef to i1), i32 undef, i32 1
   ret i32 %s
 }
 
@@ -863,7 +863,7 @@ define float @all_constant_false_undef() {
 ; CHECK-LABEL: @all_constant_false_undef(
 ; CHECK-NEXT:    ret float 1.000000e+00
 ;
-  %s = select i1 ptrtoint (float ()* @all_constant_false_undef to i1), float undef, float 1.0
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef to i1), float undef, float 1.0
   ret float %s
 }
 
@@ -871,7 +871,7 @@ define <2 x i32> @all_constant_true_undef_vec() {
 ; CHECK-LABEL: @all_constant_true_undef_vec(
 ; CHECK-NEXT:    ret <2 x i32> <i32 1, i32 -1>
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_vec to i1), <2 x i32> undef, <2 x i32> <i32 1, i32 -1>
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_vec to i1), <2 x i32> undef, <2 x i32> <i32 1, i32 -1>
   ret <2 x i32> %s
 }
 
@@ -879,45 +879,45 @@ define <2 x float> @all_constant_false_undef_vec() {
 ; CHECK-LABEL: @all_constant_false_undef_vec(
 ; CHECK-NEXT:    ret <2 x float> <float 1.000000e+00, float -1.000000e+00>
 ;
-  %s = select i1 ptrtoint (<2 x float> ()* @all_constant_false_undef_vec to i1), <2 x float> undef, <2 x float> <float 1.0, float -1.0>
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_vec to i1), <2 x float> undef, <2 x float> <float 1.0, float -1.0>
   ret <2 x float> %s
 }
 
 ; Negative tests. Don't fold if the non-undef operand is a constexpr.
 define i32 @all_constant_false_undef_true_constexpr() {
 ; CHECK-LABEL: @all_constant_false_undef_true_constexpr(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i32), i32 undef
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i32), i32 undef
 ; CHECK-NEXT:    ret i32 [[S]]
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i32), i32 undef
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i32), i32 undef
   ret i32 %s
 }
 
 define i32 @all_constant_true_undef_false_constexpr() {
 ; CHECK-LABEL: @all_constant_true_undef_false_constexpr(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i32)
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i32)
 ; CHECK-NEXT:    ret i32 [[S]]
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i32)
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i32)
   ret i32 %s
 }
 
 ; Negative tests. Don't fold if the non-undef operand is a vector containing a constexpr.
 define <2 x i32> @all_constant_false_undef_true_constexpr_vec() {
 ; CHECK-LABEL: @all_constant_false_undef_true_constexpr_vec(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
 ; CHECK-NEXT:    ret <2 x i32> [[S]]
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
   ret <2 x i32> %s
 }
 
 define <2 x i32> @all_constant_true_undef_false_constexpr_vec() {
 ; CHECK-LABEL: @all_constant_true_undef_false_constexpr_vec(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32> <i32 -1, i32 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i32)>
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32> <i32 -1, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i32)>
 ; CHECK-NEXT:    ret <2 x i32> [[S]]
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32><i32 -1, i32 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i32)>
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32><i32 -1, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i32)>
   ret <2 x i32> %s
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/select.ll b/llvm/test/Transforms/InstSimplify/select.ll
index 43a39741bc8b9..0472462eaff6e 100644
--- a/llvm/test/Transforms/InstSimplify/select.ll
+++ b/llvm/test/Transforms/InstSimplify/select.ll
@@ -581,15 +581,15 @@ define i64 @select_icmp_x_and_8_ne_0_y64_and_not_8(i32 %x, i64 %y) {
 
 ; Don't crash on a pointer or aggregate type.
 
-define i32* @select_icmp_pointers(i32* %x, i32* %y) {
+define ptr @select_icmp_pointers(ptr %x, ptr %y) {
 ; CHECK-LABEL: @select_icmp_pointers(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32* [[X:%.*]], null
-; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], i32* [[X]], i32* [[Y:%.*]]
-; CHECK-NEXT:    ret i32* [[SEL]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[X:%.*]], null
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], ptr [[X]], ptr [[Y:%.*]]
+; CHECK-NEXT:    ret ptr [[SEL]]
 ;
-  %cmp = icmp slt i32* %x, null
-  %sel = select i1 %cmp, i32* %x, i32* %y
-  ret i32* %sel
+  %cmp = icmp slt ptr %x, null
+  %sel = select i1 %cmp, ptr %x, ptr %y
+  ret ptr %sel
 }
 
 ; If the condition is known, we don't need to select, but we're not
@@ -621,26 +621,26 @@ define i8 @do_not_assume_sel_cond(i1 %cond, i8 %x, i8 %y) {
   ret i8 %sel
 }
 
-define i32* @select_icmp_eq_0_gep_operand(i32* %base, i64 %n) {
+define ptr @select_icmp_eq_0_gep_operand(ptr %base, i64 %n) {
 ; CHECK-LABEL: @select_icmp_eq_0_gep_operand(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    ret i32* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %cond = icmp eq i64 %n, 0
-  %gep = getelementptr i32, i32* %base, i64 %n
-  %r = select i1 %cond, i32* %base, i32* %gep
-  ret i32* %r
+  %gep = getelementptr i32, ptr %base, i64 %n
+  %r = select i1 %cond, ptr %base, ptr %gep
+  ret ptr %r
 }
 
-define i32* @select_icmp_ne_0_gep_operand(i32* %base, i64 %n) {
+define ptr @select_icmp_ne_0_gep_operand(ptr %base, i64 %n) {
 ; CHECK-LABEL: @select_icmp_ne_0_gep_operand(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    ret i32* [[GEP]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, ptr [[BASE:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    ret ptr [[GEP]]
 ;
   %cond = icmp ne i64 %n, 0
-  %gep = getelementptr i32, i32* %base, i64 %n
-  %r = select i1 %cond, i32* %gep, i32* %base
-  ret i32* %r
+  %gep = getelementptr i32, ptr %base, i64 %n
+  %r = select i1 %cond, ptr %gep, ptr %base
+  ret ptr %r
 }
 
 define i1 @and_cmps(i32 %x) {
@@ -845,9 +845,9 @@ define i32 @false_undef_false_freeze(i1 %cond, i32 %x) {
 
 define <2 x i32> @false_undef_true_constextpr_vec(i1 %cond) {
 ; CHECK-LABEL: @false_undef_true_constextpr_vec(
-; CHECK-NEXT:    ret <2 x i32> <i32 ptrtoint (i32* @g to i32), i32 ptrtoint (i32* @g to i32)>
+; CHECK-NEXT:    ret <2 x i32> <i32 ptrtoint (ptr @g to i32), i32 ptrtoint (ptr @g to i32)>
 ;
-  %s = select i1 %cond, <2 x i32> <i32 undef, i32 ptrtoint (i32* @g to i32)>, <2 x i32> <i32 ptrtoint (i32* @g to i32), i32 undef>
+  %s = select i1 %cond, <2 x i32> <i32 undef, i32 ptrtoint (ptr @g to i32)>, <2 x i32> <i32 ptrtoint (ptr @g to i32), i32 undef>
   ret <2 x i32> %s
 }
 
@@ -855,7 +855,7 @@ define i32 @all_constant_true_undef() {
 ; CHECK-LABEL: @all_constant_true_undef(
 ; CHECK-NEXT:    ret i32 1
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_true_undef to i1), i32 undef, i32 1
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef to i1), i32 undef, i32 1
   ret i32 %s
 }
 
@@ -863,7 +863,7 @@ define float @all_constant_false_undef() {
 ; CHECK-LABEL: @all_constant_false_undef(
 ; CHECK-NEXT:    ret float 1.000000e+00
 ;
-  %s = select i1 ptrtoint (float ()* @all_constant_false_undef to i1), float undef, float 1.0
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef to i1), float undef, float 1.0
   ret float %s
 }
 
@@ -871,7 +871,7 @@ define <2 x i32> @all_constant_true_undef_vec() {
 ; CHECK-LABEL: @all_constant_true_undef_vec(
 ; CHECK-NEXT:    ret <2 x i32> <i32 1, i32 -1>
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_vec to i1), <2 x i32> undef, <2 x i32> <i32 1, i32 -1>
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_vec to i1), <2 x i32> undef, <2 x i32> <i32 1, i32 -1>
   ret <2 x i32> %s
 }
 
@@ -879,45 +879,45 @@ define <2 x float> @all_constant_false_undef_vec() {
 ; CHECK-LABEL: @all_constant_false_undef_vec(
 ; CHECK-NEXT:    ret <2 x float> <float 1.000000e+00, float -1.000000e+00>
 ;
-  %s = select i1 ptrtoint (<2 x float> ()* @all_constant_false_undef_vec to i1), <2 x float> undef, <2 x float> <float 1.0, float -1.0>
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_vec to i1), <2 x float> undef, <2 x float> <float 1.0, float -1.0>
   ret <2 x float> %s
 }
 
 ; Negative tests. Don't fold if the non-undef operand is a constexpr.
 define i32 @all_constant_false_undef_true_constexpr() {
 ; CHECK-LABEL: @all_constant_false_undef_true_constexpr(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i32), i32 undef
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i32), i32 undef
 ; CHECK-NEXT:    ret i32 [[S]]
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (i32 ()* @all_constant_false_undef_true_constexpr to i32), i32 undef
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i1), i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr to i32), i32 undef
   ret i32 %s
 }
 
 define i32 @all_constant_true_undef_false_constexpr() {
 ; CHECK-LABEL: @all_constant_true_undef_false_constexpr(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i32)
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i32)
 ; CHECK-NEXT:    ret i32 [[S]]
 ;
-  %s = select i1 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (i32 ()* @all_constant_true_undef_false_constexpr to i32)
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i1), i32 undef, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr to i32)
   ret i32 %s
 }
 
 ; Negative tests. Don't fold if the non-undef operand is a vector containing a constexpr.
 define <2 x i32> @all_constant_false_undef_true_constexpr_vec() {
 ; CHECK-LABEL: @all_constant_false_undef_true_constexpr_vec(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
 ; CHECK-NEXT:    ret <2 x i32> [[S]]
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (<2 x i32> ()* @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
+  %s = select i1 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i1), <2 x i32> <i32 ptrtoint (ptr @all_constant_false_undef_true_constexpr_vec to i32), i32 -1>, <2 x i32> undef
   ret <2 x i32> %s
 }
 
 define <2 x i32> @all_constant_true_undef_false_constexpr_vec() {
 ; CHECK-LABEL: @all_constant_true_undef_false_constexpr_vec(
-; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32> <i32 -1, i32 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i32)>
+; CHECK-NEXT:    [[S:%.*]] = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32> <i32 -1, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i32)>
 ; CHECK-NEXT:    ret <2 x i32> [[S]]
 ;
-  %s = select i1 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32><i32 -1, i32 ptrtoint (<2 x i32> ()* @all_constant_true_undef_false_constexpr_vec to i32)>
+  %s = select i1 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i1), <2 x i32> undef, <2 x i32><i32 -1, i32 ptrtoint (ptr @all_constant_true_undef_false_constexpr_vec to i32)>
   ret <2 x i32> %s
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/simplify-nested-bitcast.ll b/llvm/test/Transforms/InstSimplify/simplify-nested-bitcast.ll
index 725cd7deeec6b..ecdc21b4a572e 100644
--- a/llvm/test/Transforms/InstSimplify/simplify-nested-bitcast.ll
+++ b/llvm/test/Transforms/InstSimplify/simplify-nested-bitcast.ll
@@ -1,49 +1,48 @@
 ; RUN: opt -always-inline -S %s | FileCheck %s
-%0 = type { i64, i64, i8 addrspace(1)*, i8 addrspace(1)* }
-%__aaa_struct = type { { i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* }, %0, [17 x i8], { i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* }, %0, [18 x i8] }
+%0 = type { i64, i64, ptr addrspace(1), ptr addrspace(1) }
+%__aaa_struct = type { { ptr, i32, i32, ptr, ptr addrspace(1) }, %0, [17 x i8], { ptr, i32, i32, ptr, ptr addrspace(1) }, %0, [18 x i8] }
 %struct.__block_descriptor = type { i64, i64 }
-%struct.__block_literal_generic = type { i8*, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* }
+%struct.__block_literal_generic = type { ptr, i32, i32, ptr, ptr addrspace(1) }
 
 @__aaa_struct_ptr = external addrspace(1) global %__aaa_struct
- at __aaa_const_init = constant %__aaa_struct { { i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* } { i8** null, i32 1342177280, i32 0, i8* bitcast (i32 (i8 addrspace(4)*, i32 addrspace(1)*)* @bl0_block_invoke to i8*), %struct.__block_descriptor addrspace(1)* bitcast (%0 addrspace(1)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(1)* @__aaa_struct_ptr, i32 0, i32 1) to %struct.__block_descriptor addrspace(1)*) }, %0 { i64 0, i64 32, i8 addrspace(1)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(1)* @__aaa_struct_ptr, i32 0, i32 2, i32 0), i8 addrspace(1)* null }, [17 x i8] c"bl0_block_invoke\00", { i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* } { i8** null, i32 1342177280, i32 0, i8* bitcast (i32 (i8 addrspace(4)*, i32 addrspace(1)*)* @__f1_block_invoke to i8*), %struct.__block_descriptor addrspace(1)* bitcast (%0 addrspace(1)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(1)* @__aaa_struct_ptr, i32 0, i32 4) to %struct.__block_descriptor addrspace(1)*) }, %0 { i64 0, i64 32, i8 addrspace(1)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(1)* @__aaa_struct_ptr, i32 0, i32 5, i32 0), i8 addrspace(1)* null }, [18 x i8] c"__f1_block_invoke\00" }
+ at __aaa_const_init = constant %__aaa_struct { { ptr, i32, i32, ptr, ptr addrspace(1) } { ptr null, i32 1342177280, i32 0, ptr @bl0_block_invoke, ptr addrspace(1) getelementptr inbounds (%__aaa_struct, ptr addrspace(1) @__aaa_struct_ptr, i32 0, i32 1) }, %0 { i64 0, i64 32, ptr addrspace(1) getelementptr inbounds (%__aaa_struct, ptr addrspace(1) @__aaa_struct_ptr, i32 0, i32 2, i32 0), ptr addrspace(1) null }, [17 x i8] c"bl0_block_invoke\00", { ptr, i32, i32, ptr, ptr addrspace(1) } { ptr null, i32 1342177280, i32 0, ptr @__f1_block_invoke, ptr addrspace(1) getelementptr inbounds (%__aaa_struct, ptr addrspace(1) @__aaa_struct_ptr, i32 0, i32 4) }, %0 { i64 0, i64 32, ptr addrspace(1) getelementptr inbounds (%__aaa_struct, ptr addrspace(1) @__aaa_struct_ptr, i32 0, i32 5, i32 0), ptr addrspace(1) null }, [18 x i8] c"__f1_block_invoke\00" }
 
 ; Function Attrs: alwaysinline norecurse nounwind readonly
-define i32 @bl0_block_invoke(i8 addrspace(4)* nocapture readnone, i32 addrspace(1)* nocapture readonly) #0 {
+define i32 @bl0_block_invoke(ptr addrspace(4) nocapture readnone, ptr addrspace(1) nocapture readonly) #0 {
 entry:
-  %2 = load i32, i32 addrspace(1)* %1, align 4
+  %2 = load i32, ptr addrspace(1) %1, align 4
   %mul = shl nsw i32 %2, 1
   ret i32 %mul
 }
 
 ; Function Attrs: alwaysinline nounwind
-define i32 @f0(i32 addrspace(1)*, i32 (i32 addrspace(1)*) addrspace(4)*) #1 {
+define i32 @f0(ptr addrspace(1), ptr addrspace(4)) #1 {
 entry:
-  %block.literal = bitcast i32 (i32 addrspace(1)*) addrspace(4)* %1 to %struct.__block_literal_generic addrspace(4)*
-  %2 = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic addrspace(4)* %block.literal, i64 0, i32 3
-  %3 = bitcast i32 (i32 addrspace(1)*) addrspace(4)* %1 to i8 addrspace(4)*
-  %4 = bitcast i8* addrspace(4)* %2 to i32 (i8 addrspace(4)*, i32 addrspace(1)*)* addrspace(4)*
-  %5 = load i32 (i8 addrspace(4)*, i32 addrspace(1)*)*, i32 (i8 addrspace(4)*, i32 addrspace(1)*)* addrspace(4)* %4, align 8
-  %call = tail call i32 %5(i8 addrspace(4)* %3, i32 addrspace(1)* %0) #2
+  %2 = getelementptr inbounds %struct.__block_literal_generic, ptr addrspace(4) %1, i64 0, i32 3
+  %3 = bitcast ptr addrspace(4) %1 to ptr addrspace(4)
+  %4 = bitcast ptr addrspace(4) %2 to ptr addrspace(4)
+  %5 = load ptr, ptr addrspace(4) %4, align 8
+  %call = tail call i32 %5(ptr addrspace(4) %3, ptr addrspace(1) %0) #2
   ret i32 %call
 }
 
 ; CHECK-LABEL: define void @f1
-; CHECK: %1 = load i32 (i8 addrspace(4)*, i32 addrspace(1)*)*, i32 (i8 addrspace(4)*, i32 addrspace(1)*)* addrspace(4)* bitcast (i8* addrspace(4)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(4)* addrspacecast (%__aaa_struct addrspace(1)* @__aaa_struct_ptr to %__aaa_struct addrspace(4)*), i64 0, i32 0, i32 3) to i32 (i8 addrspace(4)*, i32 addrspace(1)*)* addrspace(4)*), align 8
+; CHECK: %1 = load ptr, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__aaa_struct_ptr to ptr addrspace(4)), i64 16), align 8
 
 ; Function Attrs: alwaysinline nounwind
-define void @f1(i32 addrspace(1)*) #1 {
+define void @f1(ptr addrspace(1)) #1 {
 entry:
-  %call = tail call i32 @f0(i32 addrspace(1)* %0, i32 (i32 addrspace(1)*) addrspace(4)* addrspacecast (i32 (i32 addrspace(1)*) addrspace(1)* bitcast (%__aaa_struct addrspace(1)* @__aaa_struct_ptr to i32 (i32 addrspace(1)*) addrspace(1)*) to i32 (i32 addrspace(1)*) addrspace(4)*)) #3
-  store i32 %call, i32 addrspace(1)* %0, align 4
-  %call1 = tail call i32 @f0(i32 addrspace(1)* %0, i32 (i32 addrspace(1)*) addrspace(4)* addrspacecast (i32 (i32 addrspace(1)*) addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(1)* } addrspace(1)* getelementptr inbounds (%__aaa_struct, %__aaa_struct addrspace(1)* @__aaa_struct_ptr, i32 0, i32 3) to i32 (i32 addrspace(1)*) addrspace(1)*) to i32 (i32 addrspace(1)*) addrspace(4)*)) #3
-  store i32 %call1, i32 addrspace(1)* %0, align 4
+  %call = tail call i32 @f0(ptr addrspace(1) %0, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__aaa_struct_ptr to ptr addrspace(4))) #3
+  store i32 %call, ptr addrspace(1) %0, align 4
+  %call1 = tail call i32 @f0(ptr addrspace(1) %0, ptr addrspace(4) addrspacecast (ptr addrspace(1) getelementptr inbounds (%__aaa_struct, ptr addrspace(1) @__aaa_struct_ptr, i32 0, i32 3) to ptr addrspace(4))) #3
+  store i32 %call1, ptr addrspace(1) %0, align 4
   ret void
 }
 
 ; Function Attrs: alwaysinline norecurse nounwind readonly
-define i32 @__f1_block_invoke(i8 addrspace(4)* nocapture readnone, i32 addrspace(1)* nocapture readonly) #0 {
+define i32 @__f1_block_invoke(ptr addrspace(4) nocapture readnone, ptr addrspace(1) nocapture readonly) #0 {
 entry:
-  %2 = load i32, i32 addrspace(1)* %1, align 4
+  %2 = load i32, ptr addrspace(1) %1, align 4
   %add = add nsw i32 %2, 1
   ret i32 %add
 }

diff  --git a/llvm/test/Transforms/InstSimplify/vector_gep.ll b/llvm/test/Transforms/InstSimplify/vector_gep.ll
index 7a50854cdb327..ba0d978ed5b3c 100644
--- a/llvm/test/Transforms/InstSimplify/vector_gep.ll
+++ b/llvm/test/Transforms/InstSimplify/vector_gep.ll
@@ -2,105 +2,105 @@
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
-declare void @helper(<2 x i8*>)
-define void @test(<2 x i8*> %a) {
-  %A = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 0, i32 0>
-  call void @helper(<2 x i8*> %A)
+declare void @helper(<2 x ptr>)
+define void @test(<2 x ptr> %a) {
+  %A = getelementptr i8, <2 x ptr> %a, <2 x i32> <i32 0, i32 0>
+  call void @helper(<2 x ptr> %A)
   ret void
 }
 
-define <4 x i8*> @test1(<4 x i8*> %a) {
-  %gep = getelementptr i8, <4 x i8*> %a, <4 x i32> zeroinitializer
-  ret <4 x i8*> %gep
+define <4 x ptr> @test1(<4 x ptr> %a) {
+  %gep = getelementptr i8, <4 x ptr> %a, <4 x i32> zeroinitializer
+  ret <4 x ptr> %gep
 
 ; CHECK-LABEL: @test1
-; CHECK-NEXT: ret <4 x i8*> %a
+; CHECK-NEXT: ret <4 x ptr> %a
 }
 
-define <4 x i8*> @test2(<4 x i8*> %a) {
-  %gep = getelementptr i8, <4 x i8*> %a
-  ret <4 x i8*> %gep
+define <4 x ptr> @test2(<4 x ptr> %a) {
+  %gep = getelementptr i8, <4 x ptr> %a
+  ret <4 x ptr> %gep
 
 ; CHECK-LABEL: @test2
-; CHECK-NEXT: ret <4 x i8*> %a
+; CHECK-NEXT: ret <4 x ptr> %a
 }
 
 %struct = type { double, float }
 
-define <4 x float*> @test3() {
-  %gep = getelementptr %struct, <4 x %struct*> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  ret <4 x float*> %gep
+define <4 x ptr> @test3() {
+  %gep = getelementptr %struct, <4 x ptr> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x ptr> %gep
 
 ; CHECK-LABEL: @test3
-; CHECK-NEXT: ret <4 x float*> undef
+; CHECK-NEXT: ret <4 x ptr> undef
 }
 
 %struct.empty = type { }
 
-define <4 x %struct.empty*> @test4(<4 x %struct.empty*> %a) {
-  %gep = getelementptr %struct.empty, <4 x %struct.empty*> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  ret <4 x %struct.empty*> %gep
+define <4 x ptr> @test4(<4 x ptr> %a) {
+  %gep = getelementptr %struct.empty, <4 x ptr> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x ptr> %gep
 
 ; CHECK-LABEL: @test4
-; CHECK-NEXT: ret <4 x %struct.empty*> %a
+; CHECK-NEXT: ret <4 x ptr> %a
 }
 
-define <4 x i8*> @test5() {
-  %c = inttoptr <4 x i64> <i64 1, i64 2, i64 3, i64 4> to <4 x i8*>
-  %gep = getelementptr i8, <4 x i8*> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-  ret <4 x i8*> %gep
+define <4 x ptr> @test5() {
+  %c = inttoptr <4 x i64> <i64 1, i64 2, i64 3, i64 4> to <4 x ptr>
+  %gep = getelementptr i8, <4 x ptr> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  ret <4 x ptr> %gep
 
 ; CHECK-LABEL: @test5
-; CHECK-NEXT: ret <4 x i8*> getelementptr (i8, <4 x i8*> <i8* inttoptr (i64 1 to i8*), i8* inttoptr (i64 2 to i8*), i8* inttoptr (i64 3 to i8*), i8* inttoptr (i64 4 to i8*)>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
+; CHECK-NEXT: ret <4 x ptr> getelementptr (i8, <4 x ptr> <ptr inttoptr (i64 1 to ptr), ptr inttoptr (i64 2 to ptr), ptr inttoptr (i64 3 to ptr), ptr inttoptr (i64 4 to ptr)>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
 }
 
 @v = global [24 x [42 x [3 x i32]]] zeroinitializer, align 16
 
-define <16 x i32*> @test6() {
+define <16 x ptr> @test6() {
 ; CHECK-LABEL: @test6
-; CHECK-NEXT: ret <16 x i32*> getelementptr inbounds ([24 x [42 x [3 x i32]]], [24 x [42 x [3 x i32]]]* @v, <16 x i64> zeroinitializer, <16 x i64> zeroinitializer, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, <16 x i64> zeroinitializer)
-  %VectorGep = getelementptr [24 x [42 x [3 x i32]]], [24 x [42 x [3 x i32]]]* @v, i64 0, i64 0, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, i64 0
-  ret <16 x i32*> %VectorGep
+; CHECK-NEXT: ret <16 x ptr> getelementptr inbounds ([24 x [42 x [3 x i32]]], ptr @v, <16 x i64> zeroinitializer, <16 x i64> zeroinitializer, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, <16 x i64> zeroinitializer)
+  %VectorGep = getelementptr [24 x [42 x [3 x i32]]], ptr @v, i64 0, i64 0, <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, i64 0
+  ret <16 x ptr> %VectorGep
 }
 
 ; PR32697
 ; CHECK-LABEL: tinkywinky(
-; CHECK-NEXT: ret <4 x i8*> undef
-define <4 x i8*> @tinkywinky() {
-  %patatino = getelementptr i8, i8* undef, <4 x i64> undef
-  ret <4 x i8*> %patatino
+; CHECK-NEXT: ret <4 x ptr> undef
+define <4 x ptr> @tinkywinky() {
+  %patatino = getelementptr i8, ptr undef, <4 x i64> undef
+  ret <4 x ptr> %patatino
 }
 
 ; PR32697
 ; CHECK-LABEL: dipsy(
-; CHECK-NEXT: ret <4 x i8*> undef
-define <4 x i8*> @dipsy() {
-  %patatino = getelementptr i8, <4 x i8 *> undef, <4 x i64> undef
-  ret <4 x i8*> %patatino
+; CHECK-NEXT: ret <4 x ptr> undef
+define <4 x ptr> @dipsy() {
+  %patatino = getelementptr i8, <4 x ptr> undef, <4 x i64> undef
+  ret <4 x ptr> %patatino
 }
 
 ; PR32697
 ; CHECK-LABEL: laalaa(
-; CHECK-NEXT: ret <4 x i8*> undef
-define <4 x i8*> @laalaa() {
-  %patatino = getelementptr i8, <4 x i8 *> undef, i64 undef
-  ret <4 x i8*> %patatino
+; CHECK-NEXT: ret <4 x ptr> undef
+define <4 x ptr> @laalaa() {
+  %patatino = getelementptr i8, <4 x ptr> undef, i64 undef
+  ret <4 x ptr> %patatino
 }
 
-define <2 x i8*> @zero_index(i8* %p) {
+define <2 x ptr> @zero_index(ptr %p) {
 ; CHECK-LABEL: @zero_index(
-; CHECK-NEXT:    %gep = getelementptr i8, i8* %p, <2 x i64> zeroinitializer
-; CHECK-NEXT:    ret <2 x i8*> %gep
+; CHECK-NEXT:    %gep = getelementptr i8, ptr %p, <2 x i64> zeroinitializer
+; CHECK-NEXT:    ret <2 x ptr> %gep
 ;
-  %gep = getelementptr i8, i8* %p, <2 x i64> zeroinitializer
-  ret <2 x i8*> %gep
+  %gep = getelementptr i8, ptr %p, <2 x i64> zeroinitializer
+  ret <2 x ptr> %gep
 }
 
-define <2 x {}*> @unsized({}* %p) {
+define <2 x ptr> @unsized(ptr %p) {
 ; CHECK-LABEL: @unsized(
-; CHECK-NEXT:    %gep = getelementptr {}, {}* %p, <2 x i64> undef
-; CHECK-NEXT:    ret <2 x {}*> %gep
+; CHECK-NEXT:    %gep = getelementptr {}, ptr %p, <2 x i64> undef
+; CHECK-NEXT:    ret <2 x ptr> %gep
 ;
-  %gep = getelementptr {}, {}* %p, <2 x i64> undef
-  ret <2 x {}*> %gep
+  %gep = getelementptr {}, ptr %p, <2 x i64> undef
+  ret <2 x ptr> %gep
 }

diff  --git a/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll b/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
index 80142e1e530e7..262cdd4544203 100644
--- a/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
+++ b/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
@@ -2,25 +2,25 @@
 ; RUN: opt -S -passes=instsimplify < %s | FileCheck %s
 target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
 
-%mst = type { i8*, i8* }
-%mst2 = type { i32*, i32*, i32*, i32* }
+%mst = type { ptr, ptr }
+%mst2 = type { ptr, ptr, ptr, ptr }
 
- at a = private unnamed_addr constant %mst { i8* inttoptr (i64 -1 to i8*),
-  i8* inttoptr (i64 -1 to i8*)},
+ at a = private unnamed_addr constant %mst { ptr inttoptr (i64 -1 to ptr),
+  ptr inttoptr (i64 -1 to ptr)},
   align 8
- at b = private unnamed_addr constant %mst2 { i32* inttoptr (i64 42 to i32*),
-  i32* inttoptr (i64 67 to i32*),
-  i32* inttoptr (i64 33 to i32*),
-  i32* inttoptr (i64 58 to i32*)},
+ at b = private unnamed_addr constant %mst2 { ptr inttoptr (i64 42 to ptr),
+  ptr inttoptr (i64 67 to ptr),
+  ptr inttoptr (i64 33 to ptr),
+  ptr inttoptr (i64 58 to ptr)},
   align 8
 
 define i64 @fn() {
 ; CHECK-LABEL: @fn(
 ; CHECK-NEXT:    ret i64 -1
 ;
-  %x = load <2 x i8*>, <2 x i8*>* bitcast (%mst* @a to <2 x i8*>*), align 8
-  %b = extractelement <2 x i8*> %x, i32 0
-  %c = ptrtoint i8* %b to i64
+  %x = load <2 x ptr>, ptr @a, align 8
+  %b = extractelement <2 x ptr> %x, i32 0
+  %c = ptrtoint ptr %b to i64
   ret i64 %c
 }
 
@@ -28,11 +28,11 @@ define i64 @fn2() {
 ; CHECK-LABEL: @fn2(
 ; CHECK-NEXT:    ret i64 100
 ;
-  %x = load <4 x i32*>, <4 x i32*>* bitcast (%mst2* @b to <4 x i32*>*), align 8
-  %b = extractelement <4 x i32*> %x, i32 0
-  %c = extractelement <4 x i32*> %x, i32 3
-  %d = ptrtoint i32* %b to i64
-  %e = ptrtoint i32* %c to i64
+  %x = load <4 x ptr>, ptr @b, align 8
+  %b = extractelement <4 x ptr> %x, i32 0
+  %c = extractelement <4 x ptr> %x, i32 3
+  %d = ptrtoint ptr %b to i64
+  %e = ptrtoint ptr %c to i64
   %r = add i64 %d, %e
   ret i64 %r
 }

diff  --git a/llvm/test/Transforms/InstSimplify/vscale-inseltpoison.ll b/llvm/test/Transforms/InstSimplify/vscale-inseltpoison.ll
index bb241cbe8cc8f..38e39640b21da 100644
--- a/llvm/test/Transforms/InstSimplify/vscale-inseltpoison.ll
+++ b/llvm/test/Transforms/InstSimplify/vscale-inseltpoison.ll
@@ -154,47 +154,46 @@ define <vscale x 4 x float> @bitcast() {
 
 ; getelementptr
 
-define <vscale x 4 x i32*> @getelementptr_constant_foldable_1() {
+define <vscale x 4 x ptr> @getelementptr_constant_foldable_1() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_1(
-; CHECK-NEXT:    ret <vscale x 4 x i32*> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x ptr> zeroinitializer
 ;
-  %ptr = getelementptr i32, <vscale x 4 x i32*> zeroinitializer, <vscale x 4 x i64> undef
-  ret <vscale x 4 x i32*> %ptr
+  %ptr = getelementptr i32, <vscale x 4 x ptr> zeroinitializer, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }
 
-define <vscale x 4 x <vscale x 4 x i32>*> @getelementptr_constant_foldable_2() {
+define <vscale x 4 x ptr> @getelementptr_constant_foldable_2() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_2(
-; CHECK-NEXT:    ret <vscale x 4 x <vscale x 4 x i32>*> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x ptr> zeroinitializer
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, <vscale x 4 x i64> undef
-  ret <vscale x 4 x <vscale x 4 x i32>*> %ptr
+  %ptr = getelementptr <vscale x 4 x i32>, ptr null, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }
 
 ; fold getelementptr P, 0 -> P.
-define <vscale x 4 x i32>* @getelementptr_constant_foldable_3() {
+define ptr @getelementptr_constant_foldable_3() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_3(
-; CHECK-NEXT:    ret <vscale x 4 x i32>* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 0
-  ret <vscale x 4 x i32>* %ptr
+  ret ptr null
 }
 
-define <vscale x 4 x i32>* @getelementptr_not_constant_foldable(i64 %x) {
+define ptr @getelementptr_not_constant_foldable(i64 %x) {
 ; CHECK-LABEL: @getelementptr_not_constant_foldable(
-; CHECK-NEXT:    [[PTR:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 [[X:%.*]]
-; CHECK-NEXT:    ret <vscale x 4 x i32>* [[PTR]]
+; CHECK-NEXT:    [[PTR:%.*]] = getelementptr <vscale x 4 x i32>, ptr null, i64 [[X:%.*]]
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 %x
-  ret <vscale x 4 x i32>* %ptr
+  %ptr = getelementptr <vscale x 4 x i32>, ptr null, i64 %x
+  ret ptr %ptr
 }
 
 ; Check GEP's result is known to be non-null.
-define i1 @getelementptr_check_non_null(<vscale x 16 x i8>* %ptr) {
+define i1 @getelementptr_check_non_null(ptr %ptr) {
 ; CHECK-LABEL: @getelementptr_check_non_null(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %x = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %ptr, i32 1
-  %cmp = icmp eq <vscale x 16 x i8>* %x, null
+  %x = getelementptr inbounds <vscale x 16 x i8>, ptr %ptr, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/vscale.ll b/llvm/test/Transforms/InstSimplify/vscale.ll
index 076b4ac4ee8da..f86b3fa487826 100644
--- a/llvm/test/Transforms/InstSimplify/vscale.ll
+++ b/llvm/test/Transforms/InstSimplify/vscale.ll
@@ -166,47 +166,46 @@ define <vscale x 4 x float> @bitcast() {
 
 ; getelementptr
 
-define <vscale x 4 x i32*> @getelementptr_constant_foldable_1() {
+define <vscale x 4 x ptr> @getelementptr_constant_foldable_1() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_1(
-; CHECK-NEXT:    ret <vscale x 4 x i32*> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x ptr> zeroinitializer
 ;
-  %ptr = getelementptr i32, <vscale x 4 x i32*> zeroinitializer, <vscale x 4 x i64> undef
-  ret <vscale x 4 x i32*> %ptr
+  %ptr = getelementptr i32, <vscale x 4 x ptr> zeroinitializer, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }
 
-define <vscale x 4 x <vscale x 4 x i32>*> @getelementptr_constant_foldable_2() {
+define <vscale x 4 x ptr> @getelementptr_constant_foldable_2() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_2(
-; CHECK-NEXT:    ret <vscale x 4 x <vscale x 4 x i32>*> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x ptr> zeroinitializer
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, <vscale x 4 x i64> undef
-  ret <vscale x 4 x <vscale x 4 x i32>*> %ptr
+  %ptr = getelementptr <vscale x 4 x i32>, ptr null, <vscale x 4 x i64> undef
+  ret <vscale x 4 x ptr> %ptr
 }
 
 ; fold getelementptr P, 0 -> P.
-define <vscale x 4 x i32>* @getelementptr_constant_foldable_3() {
+define ptr @getelementptr_constant_foldable_3() {
 ; CHECK-LABEL: @getelementptr_constant_foldable_3(
-; CHECK-NEXT:    ret <vscale x 4 x i32>* null
+; CHECK-NEXT:    ret ptr null
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 0
-  ret <vscale x 4 x i32>* %ptr
+  ret ptr null
 }
 
-define <vscale x 4 x i32>* @getelementptr_not_constant_foldable(i64 %x) {
+define ptr @getelementptr_not_constant_foldable(i64 %x) {
 ; CHECK-LABEL: @getelementptr_not_constant_foldable(
-; CHECK-NEXT:    [[PTR:%.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 [[X:%.*]]
-; CHECK-NEXT:    ret <vscale x 4 x i32>* [[PTR]]
+; CHECK-NEXT:    [[PTR:%.*]] = getelementptr <vscale x 4 x i32>, ptr null, i64 [[X:%.*]]
+; CHECK-NEXT:    ret ptr [[PTR]]
 ;
-  %ptr = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i64 %x
-  ret <vscale x 4 x i32>* %ptr
+  %ptr = getelementptr <vscale x 4 x i32>, ptr null, i64 %x
+  ret ptr %ptr
 }
 
 ; Check GEP's result is known to be non-null.
-define i1 @getelementptr_check_non_null(<vscale x 16 x i8>* %ptr) {
+define i1 @getelementptr_check_non_null(ptr %ptr) {
 ; CHECK-LABEL: @getelementptr_check_non_null(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %x = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %ptr, i32 1
-  %cmp = icmp eq <vscale x 16 x i8>* %x, null
+  %x = getelementptr inbounds <vscale x 16 x i8>, ptr %ptr, i32 1
+  %cmp = icmp eq ptr %x, null
   ret i1 %cmp
 }
 


        


More information about the llvm-commits mailing list