[llvm] 997d7d1 - InstCombine: Convert some tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 1 20:09:05 PST 2022


Author: Matt Arsenault
Date: 2022-12-01T23:08:57-05:00
New Revision: 997d7d193149bdce5512405d7384f700c0cab39d

URL: https://github.com/llvm/llvm-project/commit/997d7d193149bdce5512405d7384f700c0cab39d
DIFF: https://github.com/llvm/llvm-project/commit/997d7d193149bdce5512405d7384f700c0cab39d.diff

LOG: InstCombine: Convert some tests to opaque pointers

Needed manual fixes:
  2008-01-06-VoidCast.ll
  alias.ll @0 type lost
  pr27703.ll totally confused
  pr44242.ll phi and update_test_checks
  pr44245.ll phi and update_test_checks

Needed re-running update_test_checks:
  2009-01-08-AlignAlloca.ll
  2009-02-20-InstCombine-SROA.ll
  addrspacecast.ll
  alloca-cast-debuginfo.ll
  alloca-in-non-alloca-as.ll
  alloca.ll
  icmp-gep.ll
  icmp-custom-dl.ll
  lifetime-no-null-opt.ll
  non-integral-pointers.ll
  pr33689_same_bitwidth.ll
  pr39908.ll
  scalable-cast-of-alloc.ll
  select-cmp-br.ll
  unpack-fca.ll

Converted to generated checks:
  2012-6-7-vselect-bitcast.ll

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
    llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
    llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
    llvm/test/Transforms/InstCombine/2012-6-7-vselect-bitcast.ll
    llvm/test/Transforms/InstCombine/addrspacecast.ll
    llvm/test/Transforms/InstCombine/alias-recursion.ll
    llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
    llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
    llvm/test/Transforms/InstCombine/alloca.ll
    llvm/test/Transforms/InstCombine/byval.ll
    llvm/test/Transforms/InstCombine/dbg-simplify-alloca-size.ll
    llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
    llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
    llvm/test/Transforms/InstCombine/icmp-gep.ll
    llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
    llvm/test/Transforms/InstCombine/non-integral-pointers.ll
    llvm/test/Transforms/InstCombine/pr27703.ll
    llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
    llvm/test/Transforms/InstCombine/pr39908.ll
    llvm/test/Transforms/InstCombine/pr44242.ll
    llvm/test/Transforms/InstCombine/pr44245.ll
    llvm/test/Transforms/InstCombine/pr58901.ll
    llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
    llvm/test/Transforms/InstCombine/select-cmp-br.ll
    llvm/test/Transforms/InstCombine/unpack-fca.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/2008-01-06-VoidCast.ll b/llvm/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
index 1c83e99f8563..d6268e5099c8 100644
--- a/llvm/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
+++ b/llvm/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
@@ -6,7 +6,7 @@ define void @f(i16 %y) {
 
 define i32 @g(i32 %y) {
 ; CHECK-LABEL: @g(
-; CHECK: call i32 bitcast
-  %x = call i32 bitcast (void (i16)* @f to i32 (i32)*)( i32 %y )		; <i32> [#uses=1]
+; CHECK-NEXT %x = call i32 @f(i32 %y)		; <i32> [#uses=1]
+  %x = call i32 @f( i32 %y )		; <i32> [#uses=1]
   ret i32 %x
 }

diff  --git a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
index 939cbf38dae7..00bce165efa2 100644
--- a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
+++ b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
@@ -4,39 +4,36 @@
 ; rdar://6480438
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i386-apple-darwin9.6"
-	%struct.Key = type { { i32, i32 } }
-	%struct.anon = type <{ i8, [3 x i8], i32 }>
+  %struct.Key = type { { i32, i32 } }
+  %struct.anon = type <{ i8, [3 x i8], i32 }>
 
 define i32 @bar(i64 %key_token2) nounwind {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[IOSPEC:%.*]] = alloca [[STRUCT_KEY:%.*]], align 8
 ; CHECK-NEXT:    [[RET:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_KEY]], %struct.Key* [[IOSPEC]], i32 0, i32 0, i32 0
-; CHECK-NEXT:    store i32 0, i32* [[TMP0]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_KEY]], %struct.Key* [[IOSPEC]], i32 0, i32 0, i32 1
-; CHECK-NEXT:    store i32 0, i32* [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast %struct.Key* [[IOSPEC]] to i64*
-; CHECK-NEXT:    store i64 [[KEY_TOKEN2:%.*]], i64* [[TMP2]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 (...) @foo(%struct.Key* nonnull byval(%struct.Key) align 4 [[IOSPEC]], i32* nonnull [[RET]]) [[ATTR0:#.*]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RET]], align 4
-; CHECK-NEXT:    ret i32 [[TMP4]]
+; CHECK-NEXT:    store i32 0, ptr [[IOSPEC]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds { i32, i32 }, ptr [[IOSPEC]], i32 0, i32 1
+; CHECK-NEXT:    store i32 0, ptr [[TMP0]], align 4
+; CHECK-NEXT:    store i64 [[KEY_TOKEN2:%.*]], ptr [[IOSPEC]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 (...) @foo(ptr nonnull byval([[STRUCT_KEY]]) align 4 [[IOSPEC]], ptr nonnull [[RET]]) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[RET]], align 4
+; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
 entry:
-  %iospec = alloca %struct.Key		; <%struct.Key*> [#uses=3]
-  %ret = alloca i32		; <i32*> [#uses=2]
+  %iospec = alloca %struct.Key		; <ptr> [#uses=3]
+  %ret = alloca i32		; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-  %0 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0		; <{ i32, i32 }*> [#uses=2]
-  %1 = getelementptr { i32, i32 }, { i32, i32 }* %0, i32 0, i32 0		; <i32*> [#uses=1]
-  store i32 0, i32* %1, align 4
-  %2 = getelementptr { i32, i32 }, { i32, i32 }* %0, i32 0, i32 1		; <i32*> [#uses=1]
-  store i32 0, i32* %2, align 4
-  %3 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0		; <{ i32, i32 }*> [#uses=1]
-  %4 = bitcast { i32, i32 }* %3 to i64*		; <i64*> [#uses=1]
-  store i64 %key_token2, i64* %4, align 4
-  %5 = call i32 (...) @foo(%struct.Key* byval(%struct.Key) align 4 %iospec, i32* %ret) nounwind		; <i32> [#uses=0]
-  %6 = load i32, i32* %ret, align 4		; <i32> [#uses=1]
-  ret i32 %6
+  %0 = getelementptr %struct.Key, ptr %iospec, i32 0, i32 0		; <ptr> [#uses=2]
+  %1 = getelementptr { i32, i32 }, ptr %0, i32 0, i32 0		; <ptr> [#uses=1]
+  store i32 0, ptr %1, align 4
+  %2 = getelementptr { i32, i32 }, ptr %0, i32 0, i32 1		; <ptr> [#uses=1]
+  store i32 0, ptr %2, align 4
+  %3 = getelementptr %struct.Key, ptr %iospec, i32 0, i32 0		; <ptr> [#uses=1]
+  store i64 %key_token2, ptr %3, align 4
+  %4 = call i32 (...) @foo(ptr byval(%struct.Key) align 4 %iospec, ptr %ret) nounwind		; <i32> [#uses=0]
+  %5 = load i32, ptr %ret, align 4		; <i32> [#uses=1]
+  ret i32 %5
 }
 
 declare i32 @foo(...)

diff  --git a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
index 440fe961e750..4316018bb4e3 100644
--- a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
+++ b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
@@ -8,271 +8,239 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i386-apple-darwin9.6"
 
-%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" = type { i32* }
+%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" = type { ptr }
 %"struct.std::_Vector_base<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" }
-%"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
+%"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { ptr, ptr, ptr }
 %"struct.std::bidirectional_iterator_tag" = type <{ i8 }>
 %"struct.std::forward_iterator_tag" = type <{ i8 }>
 %"struct.std::input_iterator_tag" = type <{ i8 }>
 %"struct.std::random_access_iterator_tag" = type <{ i8 }>
 %"struct.std::vector<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >" }
 
-define i32* @_Z3fooRSt6vectorIiSaIiEE(%"struct.std::vector<int,std::allocator<int> >"* %X) {
+define ptr @_Z3fooRSt6vectorIiSaIiEE(ptr %X) {
 ; IC-LABEL: @_Z3fooRSt6vectorIiSaIiEE(
 ; IC-NEXT:  entry:
 ; IC-NEXT:    [[__FIRST_ADDR_I_I:%.*]] = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", align 8
 ; IC-NEXT:    [[__LAST_ADDR_I_I:%.*]] = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", align 8
 ; IC-NEXT:    [[TMP0:%.*]] = alloca i32, align 4
-; IC-NEXT:    store i32 42, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP1:%.*]] = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* [[X:%.*]], i32 0, i32 0, i32 0, i32 1
-; IC-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 4
-; IC-NEXT:    [[TMP3:%.*]] = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* [[X]], i32 0, i32 0, i32 0, i32 0
-; IC-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 4
-; IC-NEXT:    [[TMP5:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP4]], i32** [[TMP5]], align 8
-; IC-NEXT:    [[TMP6:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__LAST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP2]], i32** [[TMP6]], align 8
-; IC-NEXT:    [[TMP7:%.*]] = ptrtoint i32* [[TMP2]] to i32
-; IC-NEXT:    [[TMP8:%.*]] = ptrtoint i32* [[TMP4]] to i32
-; IC-NEXT:    [[TMP9:%.*]] = sub i32 [[TMP7]], [[TMP8]]
-; IC-NEXT:    [[TMP10:%.*]] = ashr i32 [[TMP9]], 4
+; IC-NEXT:    store i32 42, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP1:%.*]] = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", ptr [[X:%.*]], i32 0, i32 1
+; IC-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 4
+; IC-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[X]], align 4
+; IC-NEXT:    store ptr [[TMP3]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    store ptr [[TMP2]], ptr [[__LAST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i32
+; IC-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[TMP3]] to i32
+; IC-NEXT:    [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]]
+; IC-NEXT:    [[TMP7:%.*]] = ashr i32 [[TMP6]], 4
 ; IC-NEXT:    br label [[BB12_I_I:%.*]]
 ; IC:       bb.i.i:
-; IC-NEXT:    [[TMP11:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
-; IC-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
-; IC-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP15:%.*]] = icmp eq i32 [[TMP13]], [[TMP14]]
-; IC-NEXT:    br i1 [[TMP15]], label [[BB1_I_I:%.*]], label [[BB2_I_I:%.*]]
+; IC-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; IC-NEXT:    [[TMP10:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP9]], [[TMP10]]
+; IC-NEXT:    br i1 [[TMP11]], label [[BB1_I_I:%.*]], label [[BB2_I_I:%.*]]
 ; IC:       bb1.i.i:
-; IC-NEXT:    [[TMP16:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[TMP16]], align 8
+; IC-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT:%.*]]
 ; IC:       bb2.i.i:
-; IC-NEXT:    [[TMP18:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP19:%.*]] = load i32*, i32** [[TMP18]], align 8
-; IC-NEXT:    [[TMP20:%.*]] = getelementptr i32, i32* [[TMP19]], i32 1
-; IC-NEXT:    [[TMP21:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP20]], i32** [[TMP21]], align 8
-; IC-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP20]], align 4
-; IC-NEXT:    [[TMP23:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP24:%.*]] = icmp eq i32 [[TMP22]], [[TMP23]]
-; IC-NEXT:    br i1 [[TMP24]], label [[BB4_I_I:%.*]], label [[BB5_I_I:%.*]]
+; IC-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 1
+; IC-NEXT:    store ptr [[TMP14]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
+; IC-NEXT:    [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[TMP15]], [[TMP16]]
+; IC-NEXT:    br i1 [[TMP17]], label [[BB4_I_I:%.*]], label [[BB5_I_I:%.*]]
 ; IC:       bb4.i.i:
-; IC-NEXT:    [[TMP25:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
+; IC-NEXT:    [[TMP18:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb5.i.i:
-; IC-NEXT:    [[TMP27:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP27]], align 8
-; IC-NEXT:    [[TMP29:%.*]] = getelementptr i32, i32* [[TMP28]], i32 1
-; IC-NEXT:    [[TMP30:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP29]], i32** [[TMP30]], align 8
-; IC-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP29]], align 4
-; IC-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP33:%.*]] = icmp eq i32 [[TMP31]], [[TMP32]]
-; IC-NEXT:    br i1 [[TMP33]], label [[BB7_I_I:%.*]], label [[BB8_I_I:%.*]]
+; IC-NEXT:    [[TMP19:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP20:%.*]] = getelementptr i32, ptr [[TMP19]], i32 1
+; IC-NEXT:    store ptr [[TMP20]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
+; IC-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP23:%.*]] = icmp eq i32 [[TMP21]], [[TMP22]]
+; IC-NEXT:    br i1 [[TMP23]], label [[BB7_I_I:%.*]], label [[BB8_I_I:%.*]]
 ; IC:       bb7.i.i:
-; IC-NEXT:    [[TMP34:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[TMP34]], align 8
+; IC-NEXT:    [[TMP24:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb8.i.i:
-; IC-NEXT:    [[TMP36:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[TMP36]], align 8
-; IC-NEXT:    [[TMP38:%.*]] = getelementptr i32, i32* [[TMP37]], i32 1
-; IC-NEXT:    [[TMP39:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP38]], i32** [[TMP39]], align 8
-; IC-NEXT:    [[TMP40:%.*]] = load i32, i32* [[TMP38]], align 4
-; IC-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP42:%.*]] = icmp eq i32 [[TMP40]], [[TMP41]]
-; IC-NEXT:    br i1 [[TMP42]], label [[BB10_I_I:%.*]], label [[BB11_I_I:%.*]]
+; IC-NEXT:    [[TMP25:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP26:%.*]] = getelementptr i32, ptr [[TMP25]], i32 1
+; IC-NEXT:    store ptr [[TMP26]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
+; IC-NEXT:    [[TMP28:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP27]], [[TMP28]]
+; IC-NEXT:    br i1 [[TMP29]], label [[BB10_I_I:%.*]], label [[BB11_I_I:%.*]]
 ; IC:       bb10.i.i:
-; IC-NEXT:    [[TMP43:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[TMP43]], align 8
+; IC-NEXT:    [[TMP30:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb11.i.i:
-; IC-NEXT:    [[TMP45:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP46:%.*]] = load i32*, i32** [[TMP45]], align 8
-; IC-NEXT:    [[TMP47:%.*]] = getelementptr i32, i32* [[TMP46]], i32 1
-; IC-NEXT:    [[TMP48:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP47]], i32** [[TMP48]], align 8
-; IC-NEXT:    [[TMP49:%.*]] = add i32 [[__TRIP_COUNT_0_I_I:%.*]], -1
+; IC-NEXT:    [[TMP31:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP32:%.*]] = getelementptr i32, ptr [[TMP31]], i32 1
+; IC-NEXT:    store ptr [[TMP32]], ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP33:%.*]] = add i32 [[__TRIP_COUNT_0_I_I:%.*]], -1
 ; IC-NEXT:    br label [[BB12_I_I]]
 ; IC:       bb12.i.i:
-; IC-NEXT:    [[__TRIP_COUNT_0_I_I]] = phi i32 [ [[TMP10]], [[ENTRY:%.*]] ], [ [[TMP49]], [[BB11_I_I]] ]
-; IC-NEXT:    [[TMP50:%.*]] = icmp sgt i32 [[__TRIP_COUNT_0_I_I]], 0
-; IC-NEXT:    br i1 [[TMP50]], label [[BB_I_I:%.*]], label [[BB13_I_I:%.*]]
+; IC-NEXT:    [[__TRIP_COUNT_0_I_I]] = phi i32 [ [[TMP7]], [[ENTRY:%.*]] ], [ [[TMP33]], [[BB11_I_I]] ]
+; IC-NEXT:    [[TMP34:%.*]] = icmp sgt i32 [[__TRIP_COUNT_0_I_I]], 0
+; IC-NEXT:    br i1 [[TMP34]], label [[BB_I_I:%.*]], label [[BB13_I_I:%.*]]
 ; IC:       bb13.i.i:
-; IC-NEXT:    [[TMP51:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__LAST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP52:%.*]] = load i32*, i32** [[TMP51]], align 8
-; IC-NEXT:    [[TMP53:%.*]] = ptrtoint i32* [[TMP52]] to i32
-; IC-NEXT:    [[TMP54:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP55:%.*]] = load i32*, i32** [[TMP54]], align 8
-; IC-NEXT:    [[TMP56:%.*]] = ptrtoint i32* [[TMP55]] to i32
-; IC-NEXT:    [[TMP57:%.*]] = sub i32 [[TMP53]], [[TMP56]]
-; IC-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 2
-; IC-NEXT:    switch i32 [[TMP58]], label [[BB26_I_I:%.*]] [
+; IC-NEXT:    [[TMP35:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[TMP35]] to i32
+; IC-NEXT:    [[TMP37:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP37]] to i32
+; IC-NEXT:    [[TMP39:%.*]] = sub i32 [[TMP36]], [[TMP38]]
+; IC-NEXT:    [[TMP40:%.*]] = ashr i32 [[TMP39]], 2
+; IC-NEXT:    switch i32 [[TMP40]], label [[BB26_I_I:%.*]] [
 ; IC-NEXT:    i32 1, label [[BB22_I_I:%.*]]
 ; IC-NEXT:    i32 2, label [[BB18_I_I:%.*]]
 ; IC-NEXT:    i32 3, label [[BB14_I_I:%.*]]
 ; IC-NEXT:    ]
 ; IC:       bb14.i.i:
-; IC-NEXT:    [[TMP59:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP60:%.*]] = load i32*, i32** [[TMP59]], align 8
-; IC-NEXT:    [[TMP61:%.*]] = load i32, i32* [[TMP60]], align 4
-; IC-NEXT:    [[TMP62:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP63:%.*]] = icmp eq i32 [[TMP61]], [[TMP62]]
-; IC-NEXT:    br i1 [[TMP63]], label [[BB16_I_I:%.*]], label [[BB17_I_I:%.*]]
+; IC-NEXT:    [[TMP41:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4
+; IC-NEXT:    [[TMP43:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP44:%.*]] = icmp eq i32 [[TMP42]], [[TMP43]]
+; IC-NEXT:    br i1 [[TMP44]], label [[BB16_I_I:%.*]], label [[BB17_I_I:%.*]]
 ; IC:       bb16.i.i:
-; IC-NEXT:    [[TMP64:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[TMP64]], align 8
+; IC-NEXT:    [[TMP45:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb17.i.i:
-; IC-NEXT:    [[TMP66:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP67:%.*]] = load i32*, i32** [[TMP66]], align 8
-; IC-NEXT:    [[TMP68:%.*]] = getelementptr i32, i32* [[TMP67]], i32 1
-; IC-NEXT:    [[TMP69:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP68]], i32** [[TMP69]], align 8
+; IC-NEXT:    [[TMP46:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP47:%.*]] = getelementptr i32, ptr [[TMP46]], i32 1
+; IC-NEXT:    store ptr [[TMP47]], ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[BB18_I_I]]
 ; IC:       bb18.i.i:
-; IC-NEXT:    [[TMP70:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[TMP70]], align 8
-; IC-NEXT:    [[TMP72:%.*]] = load i32, i32* [[TMP71]], align 4
-; IC-NEXT:    [[TMP73:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP74:%.*]] = icmp eq i32 [[TMP72]], [[TMP73]]
-; IC-NEXT:    br i1 [[TMP74]], label [[BB20_I_I:%.*]], label [[BB21_I_I:%.*]]
+; IC-NEXT:    [[TMP48:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4
+; IC-NEXT:    [[TMP50:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP51:%.*]] = icmp eq i32 [[TMP49]], [[TMP50]]
+; IC-NEXT:    br i1 [[TMP51]], label [[BB20_I_I:%.*]], label [[BB21_I_I:%.*]]
 ; IC:       bb20.i.i:
-; IC-NEXT:    [[TMP75:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP76:%.*]] = load i32*, i32** [[TMP75]], align 8
+; IC-NEXT:    [[TMP52:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb21.i.i:
-; IC-NEXT:    [[TMP77:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP78:%.*]] = load i32*, i32** [[TMP77]], align 8
-; IC-NEXT:    [[TMP79:%.*]] = getelementptr i32, i32* [[TMP78]], i32 1
-; IC-NEXT:    [[TMP80:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP79]], i32** [[TMP80]], align 8
+; IC-NEXT:    [[TMP53:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP54:%.*]] = getelementptr i32, ptr [[TMP53]], i32 1
+; IC-NEXT:    store ptr [[TMP54]], ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[BB22_I_I]]
 ; IC:       bb22.i.i:
-; IC-NEXT:    [[TMP81:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP82:%.*]] = load i32*, i32** [[TMP81]], align 8
-; IC-NEXT:    [[TMP83:%.*]] = load i32, i32* [[TMP82]], align 4
-; IC-NEXT:    [[TMP84:%.*]] = load i32, i32* [[TMP0]], align 4
-; IC-NEXT:    [[TMP85:%.*]] = icmp eq i32 [[TMP83]], [[TMP84]]
-; IC-NEXT:    br i1 [[TMP85]], label [[BB24_I_I:%.*]], label [[BB25_I_I:%.*]]
+; IC-NEXT:    [[TMP55:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4
+; IC-NEXT:    [[TMP57:%.*]] = load i32, ptr [[TMP0]], align 4
+; IC-NEXT:    [[TMP58:%.*]] = icmp eq i32 [[TMP56]], [[TMP57]]
+; IC-NEXT:    br i1 [[TMP58]], label [[BB24_I_I:%.*]], label [[BB25_I_I:%.*]]
 ; IC:       bb24.i.i:
-; IC-NEXT:    [[TMP86:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[TMP86]], align 8
+; IC-NEXT:    [[TMP59:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       bb25.i.i:
-; IC-NEXT:    [[TMP88:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP89:%.*]] = load i32*, i32** [[TMP88]], align 8
-; IC-NEXT:    [[TMP90:%.*]] = getelementptr i32, i32* [[TMP89]], i32 1
-; IC-NEXT:    [[TMP91:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__FIRST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    store i32* [[TMP90]], i32** [[TMP91]], align 8
+; IC-NEXT:    [[TMP60:%.*]] = load ptr, ptr [[__FIRST_ADDR_I_I]], align 8
+; IC-NEXT:    [[TMP61:%.*]] = getelementptr i32, ptr [[TMP60]], i32 1
+; IC-NEXT:    store ptr [[TMP61]], ptr [[__FIRST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[BB26_I_I]]
 ; IC:       bb26.i.i:
-; IC-NEXT:    [[TMP92:%.*]] = getelementptr inbounds %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* [[__LAST_ADDR_I_I]], i32 0, i32 0
-; IC-NEXT:    [[TMP93:%.*]] = load i32*, i32** [[TMP92]], align 8
+; IC-NEXT:    [[TMP62:%.*]] = load ptr, ptr [[__LAST_ADDR_I_I]], align 8
 ; IC-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC:       _ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit:
-; IC-NEXT:    [[DOT0_0_I_I:%.*]] = phi i32* [ [[TMP93]], [[BB26_I_I]] ], [ [[TMP87]], [[BB24_I_I]] ], [ [[TMP76]], [[BB20_I_I]] ], [ [[TMP65]], [[BB16_I_I]] ], [ [[TMP44]], [[BB10_I_I]] ], [ [[TMP35]], [[BB7_I_I]] ], [ [[TMP26]], [[BB4_I_I]] ], [ [[TMP17]], [[BB1_I_I]] ]
+; IC-NEXT:    [[DOT0_0_I_I:%.*]] = phi ptr [ [[TMP62]], [[BB26_I_I]] ], [ [[TMP59]], [[BB24_I_I]] ], [ [[TMP52]], [[BB20_I_I]] ], [ [[TMP45]], [[BB16_I_I]] ], [ [[TMP30]], [[BB10_I_I]] ], [ [[TMP24]], [[BB7_I_I]] ], [ [[TMP18]], [[BB4_I_I]] ], [ [[TMP12]], [[BB1_I_I]] ]
 ; IC-NEXT:    br label [[RETURN:%.*]]
 ; IC:       return:
-; IC-NEXT:    ret i32* [[DOT0_0_I_I]]
+; IC-NEXT:    ret ptr [[DOT0_0_I_I]]
 ;
 ; IC_SROA-LABEL: @_Z3fooRSt6vectorIiSaIiEE(
 ; IC_SROA-NEXT:  entry:
-; IC_SROA-NEXT:    [[TMP0:%.*]] = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* [[X:%.*]], i32 0, i32 0, i32 0, i32 1
-; IC_SROA-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 4
-; IC_SROA-NEXT:    [[TMP2:%.*]] = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* [[X]], i32 0, i32 0, i32 0, i32 0
-; IC_SROA-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 4
-; IC_SROA-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP1]] to i32
-; IC_SROA-NEXT:    [[TMP5:%.*]] = ptrtoint i32* [[TMP3]] to i32
-; IC_SROA-NEXT:    [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]]
-; IC_SROA-NEXT:    [[TMP7:%.*]] = ashr i32 [[TMP6]], 4
+; IC_SROA-NEXT:    [[TMP0:%.*]] = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", ptr [[X:%.*]], i32 0, i32 1
+; IC_SROA-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 4
+; IC_SROA-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[X]], align 4
+; IC_SROA-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[TMP1]] to i32
+; IC_SROA-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i32
+; IC_SROA-NEXT:    [[TMP5:%.*]] = sub i32 [[TMP3]], [[TMP4]]
+; IC_SROA-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 4
 ; IC_SROA-NEXT:    br label [[BB12_I_I:%.*]]
 ; IC_SROA:       bb.i.i:
-; IC_SROA-NEXT:    [[TMP8:%.*]] = load i32, i32* [[__FIRST_ADDR_I_I_SROA_0_0:%.*]], align 4
-; IC_SROA-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 42
-; IC_SROA-NEXT:    br i1 [[TMP9]], label [[BB1_I_I:%.*]], label [[BB2_I_I:%.*]]
+; IC_SROA-NEXT:    [[TMP7:%.*]] = load i32, ptr [[__FIRST_ADDR_I_I_SROA_0_0:%.*]], align 4
+; IC_SROA-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 42
+; IC_SROA-NEXT:    br i1 [[TMP8]], label [[BB1_I_I:%.*]], label [[BB2_I_I:%.*]]
 ; IC_SROA:       bb1.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT:%.*]]
 ; IC_SROA:       bb2.i.i:
-; IC_SROA-NEXT:    [[TMP10:%.*]] = getelementptr i32, i32* [[__FIRST_ADDR_I_I_SROA_0_0]], i32 1
-; IC_SROA-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
-; IC_SROA-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 42
-; IC_SROA-NEXT:    br i1 [[TMP12]], label [[BB4_I_I:%.*]], label [[BB5_I_I:%.*]]
+; IC_SROA-NEXT:    [[TMP9:%.*]] = getelementptr i32, ptr [[__FIRST_ADDR_I_I_SROA_0_0]], i32 1
+; IC_SROA-NEXT:    [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
+; IC_SROA-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP10]], 42
+; IC_SROA-NEXT:    br i1 [[TMP11]], label [[BB4_I_I:%.*]], label [[BB5_I_I:%.*]]
 ; IC_SROA:       bb4.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb5.i.i:
-; IC_SROA-NEXT:    [[TMP13:%.*]] = getelementptr i32, i32* [[TMP10]], i32 1
-; IC_SROA-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
-; IC_SROA-NEXT:    [[TMP15:%.*]] = icmp eq i32 [[TMP14]], 42
-; IC_SROA-NEXT:    br i1 [[TMP15]], label [[BB7_I_I:%.*]], label [[BB8_I_I:%.*]]
+; IC_SROA-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
+; IC_SROA-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
+; IC_SROA-NEXT:    [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 42
+; IC_SROA-NEXT:    br i1 [[TMP14]], label [[BB7_I_I:%.*]], label [[BB8_I_I:%.*]]
 ; IC_SROA:       bb7.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb8.i.i:
-; IC_SROA-NEXT:    [[TMP16:%.*]] = getelementptr i32, i32* [[TMP13]], i32 1
-; IC_SROA-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
-; IC_SROA-NEXT:    [[TMP18:%.*]] = icmp eq i32 [[TMP17]], 42
-; IC_SROA-NEXT:    br i1 [[TMP18]], label [[BB10_I_I:%.*]], label [[BB11_I_I:%.*]]
+; IC_SROA-NEXT:    [[TMP15:%.*]] = getelementptr i32, ptr [[TMP12]], i32 1
+; IC_SROA-NEXT:    [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
+; IC_SROA-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 42
+; IC_SROA-NEXT:    br i1 [[TMP17]], label [[BB10_I_I:%.*]], label [[BB11_I_I:%.*]]
 ; IC_SROA:       bb10.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb11.i.i:
-; IC_SROA-NEXT:    [[TMP19:%.*]] = getelementptr i32, i32* [[TMP16]], i32 1
-; IC_SROA-NEXT:    [[TMP20:%.*]] = add i32 [[__TRIP_COUNT_0_I_I:%.*]], -1
+; IC_SROA-NEXT:    [[TMP18:%.*]] = getelementptr i32, ptr [[TMP15]], i32 1
+; IC_SROA-NEXT:    [[TMP19:%.*]] = add i32 [[__TRIP_COUNT_0_I_I:%.*]], -1
 ; IC_SROA-NEXT:    br label [[BB12_I_I]]
 ; IC_SROA:       bb12.i.i:
-; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_0]] = phi i32* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[TMP19]], [[BB11_I_I]] ]
-; IC_SROA-NEXT:    [[__TRIP_COUNT_0_I_I]] = phi i32 [ [[TMP7]], [[ENTRY]] ], [ [[TMP20]], [[BB11_I_I]] ]
-; IC_SROA-NEXT:    [[TMP21:%.*]] = icmp sgt i32 [[__TRIP_COUNT_0_I_I]], 0
-; IC_SROA-NEXT:    br i1 [[TMP21]], label [[BB_I_I:%.*]], label [[BB13_I_I:%.*]]
+; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_0]] = phi ptr [ [[TMP2]], [[ENTRY:%.*]] ], [ [[TMP18]], [[BB11_I_I]] ]
+; IC_SROA-NEXT:    [[__TRIP_COUNT_0_I_I]] = phi i32 [ [[TMP6]], [[ENTRY]] ], [ [[TMP19]], [[BB11_I_I]] ]
+; IC_SROA-NEXT:    [[TMP20:%.*]] = icmp sgt i32 [[__TRIP_COUNT_0_I_I]], 0
+; IC_SROA-NEXT:    br i1 [[TMP20]], label [[BB_I_I:%.*]], label [[BB13_I_I:%.*]]
 ; IC_SROA:       bb13.i.i:
-; IC_SROA-NEXT:    [[TMP22:%.*]] = ptrtoint i32* [[TMP1]] to i32
-; IC_SROA-NEXT:    [[TMP23:%.*]] = ptrtoint i32* [[__FIRST_ADDR_I_I_SROA_0_0]] to i32
-; IC_SROA-NEXT:    [[TMP24:%.*]] = sub i32 [[TMP22]], [[TMP23]]
-; IC_SROA-NEXT:    [[TMP25:%.*]] = ashr i32 [[TMP24]], 2
-; IC_SROA-NEXT:    switch i32 [[TMP25]], label [[BB26_I_I:%.*]] [
+; IC_SROA-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP1]] to i32
+; IC_SROA-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[__FIRST_ADDR_I_I_SROA_0_0]] to i32
+; IC_SROA-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP21]], [[TMP22]]
+; IC_SROA-NEXT:    [[TMP24:%.*]] = ashr i32 [[TMP23]], 2
+; IC_SROA-NEXT:    switch i32 [[TMP24]], label [[BB26_I_I:%.*]] [
 ; IC_SROA-NEXT:    i32 1, label [[BB22_I_I:%.*]]
 ; IC_SROA-NEXT:    i32 2, label [[BB18_I_I:%.*]]
 ; IC_SROA-NEXT:    i32 3, label [[BB14_I_I:%.*]]
 ; IC_SROA-NEXT:    ]
 ; IC_SROA:       bb14.i.i:
-; IC_SROA-NEXT:    [[TMP26:%.*]] = load i32, i32* [[__FIRST_ADDR_I_I_SROA_0_0]], align 4
-; IC_SROA-NEXT:    [[TMP27:%.*]] = icmp eq i32 [[TMP26]], 42
-; IC_SROA-NEXT:    br i1 [[TMP27]], label [[BB16_I_I:%.*]], label [[BB17_I_I:%.*]]
+; IC_SROA-NEXT:    [[TMP25:%.*]] = load i32, ptr [[__FIRST_ADDR_I_I_SROA_0_0]], align 4
+; IC_SROA-NEXT:    [[TMP26:%.*]] = icmp eq i32 [[TMP25]], 42
+; IC_SROA-NEXT:    br i1 [[TMP26]], label [[BB16_I_I:%.*]], label [[BB17_I_I:%.*]]
 ; IC_SROA:       bb16.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb17.i.i:
-; IC_SROA-NEXT:    [[TMP28:%.*]] = getelementptr i32, i32* [[__FIRST_ADDR_I_I_SROA_0_0]], i32 1
+; IC_SROA-NEXT:    [[TMP27:%.*]] = getelementptr i32, ptr [[__FIRST_ADDR_I_I_SROA_0_0]], i32 1
 ; IC_SROA-NEXT:    br label [[BB18_I_I]]
 ; IC_SROA:       bb18.i.i:
-; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_1:%.*]] = phi i32* [ [[TMP28]], [[BB17_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB13_I_I]] ]
-; IC_SROA-NEXT:    [[TMP29:%.*]] = load i32, i32* [[__FIRST_ADDR_I_I_SROA_0_1]], align 4
-; IC_SROA-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP29]], 42
-; IC_SROA-NEXT:    br i1 [[TMP30]], label [[BB20_I_I:%.*]], label [[BB21_I_I:%.*]]
+; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_1:%.*]] = phi ptr [ [[TMP27]], [[BB17_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB13_I_I]] ]
+; IC_SROA-NEXT:    [[TMP28:%.*]] = load i32, ptr [[__FIRST_ADDR_I_I_SROA_0_1]], align 4
+; IC_SROA-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP28]], 42
+; IC_SROA-NEXT:    br i1 [[TMP29]], label [[BB20_I_I:%.*]], label [[BB21_I_I:%.*]]
 ; IC_SROA:       bb20.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb21.i.i:
-; IC_SROA-NEXT:    [[TMP31:%.*]] = getelementptr i32, i32* [[__FIRST_ADDR_I_I_SROA_0_1]], i32 1
+; IC_SROA-NEXT:    [[TMP30:%.*]] = getelementptr i32, ptr [[__FIRST_ADDR_I_I_SROA_0_1]], i32 1
 ; IC_SROA-NEXT:    br label [[BB22_I_I]]
 ; IC_SROA:       bb22.i.i:
-; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_2:%.*]] = phi i32* [ [[TMP31]], [[BB21_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB13_I_I]] ]
-; IC_SROA-NEXT:    [[TMP32:%.*]] = load i32, i32* [[__FIRST_ADDR_I_I_SROA_0_2]], align 4
-; IC_SROA-NEXT:    [[TMP33:%.*]] = icmp eq i32 [[TMP32]], 42
-; IC_SROA-NEXT:    br i1 [[TMP33]], label [[BB24_I_I:%.*]], label [[BB25_I_I:%.*]]
+; IC_SROA-NEXT:    [[__FIRST_ADDR_I_I_SROA_0_2:%.*]] = phi ptr [ [[TMP30]], [[BB21_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB13_I_I]] ]
+; IC_SROA-NEXT:    [[TMP31:%.*]] = load i32, ptr [[__FIRST_ADDR_I_I_SROA_0_2]], align 4
+; IC_SROA-NEXT:    [[TMP32:%.*]] = icmp eq i32 [[TMP31]], 42
+; IC_SROA-NEXT:    br i1 [[TMP32]], label [[BB24_I_I:%.*]], label [[BB25_I_I:%.*]]
 ; IC_SROA:       bb24.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       bb25.i.i:
-; IC_SROA-NEXT:    [[TMP34:%.*]] = getelementptr i32, i32* [[__FIRST_ADDR_I_I_SROA_0_2]], i32 1
+; IC_SROA-NEXT:    [[TMP33:%.*]] = getelementptr i32, ptr [[__FIRST_ADDR_I_I_SROA_0_2]], i32 1
 ; IC_SROA-NEXT:    br label [[BB26_I_I]]
 ; IC_SROA:       bb26.i.i:
 ; IC_SROA-NEXT:    br label [[_ZST4FINDIN9__GNU_CXX17__NORMAL_ITERATORIPIST6VECTORIISAIIEEEEIET_S7_S7_RKT0__EXIT]]
 ; IC_SROA:       _ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit:
-; IC_SROA-NEXT:    [[DOT0_0_I_I:%.*]] = phi i32* [ [[TMP1]], [[BB26_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_2]], [[BB24_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_1]], [[BB20_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB16_I_I]] ], [ [[TMP16]], [[BB10_I_I]] ], [ [[TMP13]], [[BB7_I_I]] ], [ [[TMP10]], [[BB4_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB1_I_I]] ]
+; IC_SROA-NEXT:    [[DOT0_0_I_I:%.*]] = phi ptr [ [[TMP1]], [[BB26_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_2]], [[BB24_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_1]], [[BB20_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB16_I_I]] ], [ [[TMP15]], [[BB10_I_I]] ], [ [[TMP12]], [[BB7_I_I]] ], [ [[TMP9]], [[BB4_I_I]] ], [ [[__FIRST_ADDR_I_I_SROA_0_0]], [[BB1_I_I]] ]
 ; IC_SROA-NEXT:    br label [[RETURN:%.*]]
 ; IC_SROA:       return:
-; IC_SROA-NEXT:    ret i32* [[DOT0_0_I_I]]
+; IC_SROA-NEXT:    ret ptr [[DOT0_0_I_I]]
 ;
 entry:
   %0 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
@@ -285,254 +253,250 @@ entry:
   %3 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
   %4 = alloca i32
   %"alloca point" = bitcast i32 0 to i32
-  store i32 42, i32* %4, align 4
-  %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
-  %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0
-  %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1
-  %8 = load i32*, i32** %7, align 4
-  %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
-  store i32* %8, i32** %9, align 4
-  %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
-  %11 = load i32*, i32** %10, align 4
-  %tmp2.i = ptrtoint i32* %11 to i32
-  %tmp1.i = inttoptr i32 %tmp2.i to i32*
-  %tmp3 = ptrtoint i32* %tmp1.i to i32
-  %tmp2 = inttoptr i32 %tmp3 to i32*
-  %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
-  %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0
-  %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0
-  %15 = load i32*, i32** %14, align 4
-  %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
-  store i32* %15, i32** %16, align 4
-  %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
-  %18 = load i32*, i32** %17, align 4
-  %tmp2.i17 = ptrtoint i32* %18 to i32
-  %tmp1.i18 = inttoptr i32 %tmp2.i17 to i32*
-  %tmp8 = ptrtoint i32* %tmp1.i18 to i32
-  %tmp6 = inttoptr i32 %tmp8 to i32*
-  %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
-  store i32* %tmp6, i32** %19
-  %20 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0
-  %21 = load i8, i8* %20, align 1
+  store i32 42, ptr %4, align 4
+  %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >", ptr %X, i32 0, i32 0
+  %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", ptr %5, i32 0, i32 0
+  %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", ptr %6, i32 0, i32 1
+  %8 = load ptr, ptr %7, align 4
+  %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %3, i32 0, i32 0
+  store ptr %8, ptr %9, align 4
+  %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %3, i32 0, i32 0
+  %11 = load ptr, ptr %10, align 4
+  %tmp2.i = ptrtoint ptr %11 to i32
+  %tmp1.i = inttoptr i32 %tmp2.i to ptr
+  %tmp3 = ptrtoint ptr %tmp1.i to i32
+  %tmp2 = inttoptr i32 %tmp3 to ptr
+  %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >", ptr %X, i32 0, i32 0
+  %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", ptr %12, i32 0, i32 0
+  %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", ptr %13, i32 0, i32 0
+  %15 = load ptr, ptr %14, align 4
+  %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %0, i32 0, i32 0
+  store ptr %15, ptr %16, align 4
+  %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %0, i32 0, i32 0
+  %18 = load ptr, ptr %17, align 4
+  %tmp2.i17 = ptrtoint ptr %18 to i32
+  %tmp1.i18 = inttoptr i32 %tmp2.i17 to ptr
+  %tmp8 = ptrtoint ptr %tmp1.i18 to i32
+  %tmp6 = inttoptr i32 %tmp8 to ptr
+  %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i, i32 0, i32 0
+  store ptr %tmp6, ptr %19
+  %20 = load i8, ptr %1, align 1
+  %21 = or i8 %20, 0
   %22 = or i8 %21, 0
   %23 = or i8 %22, 0
-  %24 = or i8 %23, 0
-  %25 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0
-  store i8 0, i8* %25, align 1
-  %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
-  %val.i = load i32*, i32** %elt.i
-  %tmp.i = bitcast %"struct.std::bidirectional_iterator_tag"* %unnamed_arg.i to i8*
-  %tmp9.i = bitcast %"struct.std::bidirectional_iterator_tag"* %2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp.i, i8* %tmp9.i, i64 1, i1 false)
-  %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %val.i, i32** %26
-  %27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
-  store i32* %tmp2, i32** %27
-  %28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
-  %29 = load i32*, i32** %28, align 4
-  %30 = ptrtoint i32* %29 to i32
-  %31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %32 = load i32*, i32** %31, align 4
-  %33 = ptrtoint i32* %32 to i32
-  %34 = sub i32 %30, %33
-  %35 = ashr i32 %34, 2
-  %36 = ashr i32 %35, 2
+  store i8 0, ptr %2, align 1
+  %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i, i32 0, i32 0
+  %val.i = load ptr, ptr %elt.i
+  call void @llvm.memcpy.p0.p0.i64(ptr %unnamed_arg.i, ptr %2, i64 1, i1 false)
+  %24 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %val.i, ptr %24
+  %25 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__last_addr.i.i, i32 0, i32 0
+  store ptr %tmp2, ptr %25
+  %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__last_addr.i.i, i32 0, i32 0
+  %27 = load ptr, ptr %26, align 4
+  %28 = ptrtoint ptr %27 to i32
+  %29 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %30 = load ptr, ptr %29, align 4
+  %31 = ptrtoint ptr %30 to i32
+  %32 = sub i32 %28, %31
+  %33 = ashr i32 %32, 2
+  %34 = ashr i32 %33, 2
   br label %bb12.i.i
 
 bb.i.i:                                           ; preds = %bb12.i.i
-  %37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %38 = load i32*, i32** %37, align 4
-  %39 = load i32, i32* %38, align 4
-  %40 = load i32, i32* %4, align 4
-  %41 = icmp eq i32 %39, %40
-  %42 = zext i1 %41 to i8
-  %toBool.i.i = icmp ne i8 %42, 0
+  %35 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %36 = load ptr, ptr %35, align 4
+  %37 = load i32, ptr %36, align 4
+  %38 = load i32, ptr %4, align 4
+  %39 = icmp eq i32 %37, %38
+  %40 = zext i1 %39 to i8
+  %toBool.i.i = icmp ne i8 %40, 0
   br i1 %toBool.i.i, label %bb1.i.i, label %bb2.i.i
 
 bb1.i.i:                                          ; preds = %bb.i.i
-  %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %44 = load i32*, i32** %43, align 4
+  %41 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %42 = load ptr, ptr %41, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb2.i.i:                                          ; preds = %bb.i.i
-  %45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %46 = load i32*, i32** %45, align 4
-  %47 = getelementptr i32, i32* %46, i64 1
-  %48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %47, i32** %48, align 4
-  %49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %50 = load i32*, i32** %49, align 4
-  %51 = load i32, i32* %50, align 4
-  %52 = load i32, i32* %4, align 4
-  %53 = icmp eq i32 %51, %52
-  %54 = zext i1 %53 to i8
-  %toBool3.i.i = icmp ne i8 %54, 0
+  %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %44 = load ptr, ptr %43, align 4
+  %45 = getelementptr i32, ptr %44, i64 1
+  %46 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %45, ptr %46, align 4
+  %47 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %48 = load ptr, ptr %47, align 4
+  %49 = load i32, ptr %48, align 4
+  %50 = load i32, ptr %4, align 4
+  %51 = icmp eq i32 %49, %50
+  %52 = zext i1 %51 to i8
+  %toBool3.i.i = icmp ne i8 %52, 0
   br i1 %toBool3.i.i, label %bb4.i.i, label %bb5.i.i
 
 bb4.i.i:                                          ; preds = %bb2.i.i
-  %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %56 = load i32*, i32** %55, align 4
+  %53 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %54 = load ptr, ptr %53, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb5.i.i:                                          ; preds = %bb2.i.i
-  %57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %58 = load i32*, i32** %57, align 4
-  %59 = getelementptr i32, i32* %58, i64 1
-  %60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %59, i32** %60, align 4
-  %61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %62 = load i32*, i32** %61, align 4
-  %63 = load i32, i32* %62, align 4
-  %64 = load i32, i32* %4, align 4
-  %65 = icmp eq i32 %63, %64
-  %66 = zext i1 %65 to i8
-  %toBool6.i.i = icmp ne i8 %66, 0
+  %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %56 = load ptr, ptr %55, align 4
+  %57 = getelementptr i32, ptr %56, i64 1
+  %58 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %57, ptr %58, align 4
+  %59 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %60 = load ptr, ptr %59, align 4
+  %61 = load i32, ptr %60, align 4
+  %62 = load i32, ptr %4, align 4
+  %63 = icmp eq i32 %61, %62
+  %64 = zext i1 %63 to i8
+  %toBool6.i.i = icmp ne i8 %64, 0
   br i1 %toBool6.i.i, label %bb7.i.i, label %bb8.i.i
 
 bb7.i.i:                                          ; preds = %bb5.i.i
-  %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %68 = load i32*, i32** %67, align 4
+  %65 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %66 = load ptr, ptr %65, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb8.i.i:                                          ; preds = %bb5.i.i
-  %69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %70 = load i32*, i32** %69, align 4
-  %71 = getelementptr i32, i32* %70, i64 1
-  %72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %71, i32** %72, align 4
-  %73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %74 = load i32*, i32** %73, align 4
-  %75 = load i32, i32* %74, align 4
-  %76 = load i32, i32* %4, align 4
-  %77 = icmp eq i32 %75, %76
-  %78 = zext i1 %77 to i8
-  %toBool9.i.i = icmp ne i8 %78, 0
+  %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %68 = load ptr, ptr %67, align 4
+  %69 = getelementptr i32, ptr %68, i64 1
+  %70 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %69, ptr %70, align 4
+  %71 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %72 = load ptr, ptr %71, align 4
+  %73 = load i32, ptr %72, align 4
+  %74 = load i32, ptr %4, align 4
+  %75 = icmp eq i32 %73, %74
+  %76 = zext i1 %75 to i8
+  %toBool9.i.i = icmp ne i8 %76, 0
   br i1 %toBool9.i.i, label %bb10.i.i, label %bb11.i.i
 
 bb10.i.i:                                         ; preds = %bb8.i.i
-  %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %80 = load i32*, i32** %79, align 4
+  %77 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %78 = load ptr, ptr %77, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb11.i.i:                                         ; preds = %bb8.i.i
-  %81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %82 = load i32*, i32** %81, align 4
-  %83 = getelementptr i32, i32* %82, i64 1
-  %84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %83, i32** %84, align 4
-  %85 = sub i32 %__trip_count.0.i.i, 1
+  %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %80 = load ptr, ptr %79, align 4
+  %81 = getelementptr i32, ptr %80, i64 1
+  %82 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %81, ptr %82, align 4
+  %83 = sub i32 %__trip_count.0.i.i, 1
   br label %bb12.i.i
 
 bb12.i.i:                                         ; preds = %bb11.i.i, %entry
-  %__trip_count.0.i.i = phi i32 [ %36, %entry ], [ %85, %bb11.i.i ]
-  %86 = icmp sgt i32 %__trip_count.0.i.i, 0
-  br i1 %86, label %bb.i.i, label %bb13.i.i
+  %__trip_count.0.i.i = phi i32 [ %34, %entry ], [ %83, %bb11.i.i ]
+  %84 = icmp sgt i32 %__trip_count.0.i.i, 0
+  br i1 %84, label %bb.i.i, label %bb13.i.i
 
 bb13.i.i:                                         ; preds = %bb12.i.i
-  %87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
-  %88 = load i32*, i32** %87, align 4
-  %89 = ptrtoint i32* %88 to i32
-  %90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %91 = load i32*, i32** %90, align 4
-  %92 = ptrtoint i32* %91 to i32
-  %93 = sub i32 %89, %92
-  %94 = ashr i32 %93, 2
-  switch i32 %94, label %bb26.i.i [
+  %85 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__last_addr.i.i, i32 0, i32 0
+  %86 = load ptr, ptr %85, align 4
+  %87 = ptrtoint ptr %86 to i32
+  %88 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %89 = load ptr, ptr %88, align 4
+  %90 = ptrtoint ptr %89 to i32
+  %91 = sub i32 %87, %90
+  %92 = ashr i32 %91, 2
+  switch i32 %92, label %bb26.i.i [
   i32 1, label %bb22.i.i
   i32 2, label %bb18.i.i
   i32 3, label %bb14.i.i
   ]
 
 bb14.i.i:                                         ; preds = %bb13.i.i
-  %95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %96 = load i32*, i32** %95, align 4
-  %97 = load i32, i32* %96, align 4
-  %98 = load i32, i32* %4, align 4
-  %99 = icmp eq i32 %97, %98
-  %100 = zext i1 %99 to i8
-  %toBool15.i.i = icmp ne i8 %100, 0
+  %93 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %94 = load ptr, ptr %93, align 4
+  %95 = load i32, ptr %94, align 4
+  %96 = load i32, ptr %4, align 4
+  %97 = icmp eq i32 %95, %96
+  %98 = zext i1 %97 to i8
+  %toBool15.i.i = icmp ne i8 %98, 0
   br i1 %toBool15.i.i, label %bb16.i.i, label %bb17.i.i
 
 bb16.i.i:                                         ; preds = %bb14.i.i
-  %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %102 = load i32*, i32** %101, align 4
+  %99 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %100 = load ptr, ptr %99, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb17.i.i:                                         ; preds = %bb14.i.i
-  %103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %104 = load i32*, i32** %103, align 4
-  %105 = getelementptr i32, i32* %104, i64 1
-  %106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %105, i32** %106, align 4
+  %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %102 = load ptr, ptr %101, align 4
+  %103 = getelementptr i32, ptr %102, i64 1
+  %104 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %103, ptr %104, align 4
   br label %bb18.i.i
 
 bb18.i.i:                                         ; preds = %bb17.i.i, %bb13.i.i
-  %107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %108 = load i32*, i32** %107, align 4
-  %109 = load i32, i32* %108, align 4
-  %110 = load i32, i32* %4, align 4
-  %111 = icmp eq i32 %109, %110
-  %112 = zext i1 %111 to i8
-  %toBool19.i.i = icmp ne i8 %112, 0
+  %105 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %106 = load ptr, ptr %105, align 4
+  %107 = load i32, ptr %106, align 4
+  %108 = load i32, ptr %4, align 4
+  %109 = icmp eq i32 %107, %108
+  %110 = zext i1 %109 to i8
+  %toBool19.i.i = icmp ne i8 %110, 0
   br i1 %toBool19.i.i, label %bb20.i.i, label %bb21.i.i
 
 bb20.i.i:                                         ; preds = %bb18.i.i
-  %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %114 = load i32*, i32** %113, align 4
+  %111 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %112 = load ptr, ptr %111, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb21.i.i:                                         ; preds = %bb18.i.i
-  %115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %116 = load i32*, i32** %115, align 4
-  %117 = getelementptr i32, i32* %116, i64 1
-  %118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %117, i32** %118, align 4
+  %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %114 = load ptr, ptr %113, align 4
+  %115 = getelementptr i32, ptr %114, i64 1
+  %116 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %115, ptr %116, align 4
   br label %bb22.i.i
 
 bb22.i.i:                                         ; preds = %bb21.i.i, %bb13.i.i
-  %119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %120 = load i32*, i32** %119, align 4
-  %121 = load i32, i32* %120, align 4
-  %122 = load i32, i32* %4, align 4
-  %123 = icmp eq i32 %121, %122
-  %124 = zext i1 %123 to i8
-  %toBool23.i.i = icmp ne i8 %124, 0
+  %117 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %118 = load ptr, ptr %117, align 4
+  %119 = load i32, ptr %118, align 4
+  %120 = load i32, ptr %4, align 4
+  %121 = icmp eq i32 %119, %120
+  %122 = zext i1 %121 to i8
+  %toBool23.i.i = icmp ne i8 %122, 0
   br i1 %toBool23.i.i, label %bb24.i.i, label %bb25.i.i
 
 bb24.i.i:                                         ; preds = %bb22.i.i
-  %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %126 = load i32*, i32** %125, align 4
+  %123 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %124 = load ptr, ptr %123, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 bb25.i.i:                                         ; preds = %bb22.i.i
-  %127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  %128 = load i32*, i32** %127, align 4
-  %129 = getelementptr i32, i32* %128, i64 1
-  %130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
-  store i32* %129, i32** %130, align 4
+  %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  %126 = load ptr, ptr %125, align 4
+  %127 = getelementptr i32, ptr %126, i64 1
+  %128 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__first_addr.i.i, i32 0, i32 0
+  store ptr %127, ptr %128, align 4
   br label %bb26.i.i
 
 bb26.i.i:                                         ; preds = %bb25.i.i, %bb13.i.i
-  %131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
-  %132 = load i32*, i32** %131, align 4
+  %129 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", ptr %__last_addr.i.i, i32 0, i32 0
+  %130 = load ptr, ptr %129, align 4
   br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
 
 _ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit: ; preds = %bb26.i.i, %bb24.i.i, %bb20.i.i, %bb16.i.i, %bb10.i.i, %bb7.i.i, %bb4.i.i, %bb1.i.i
-  %.0.0.i.i = phi i32* [ %132, %bb26.i.i ], [ %126, %bb24.i.i ], [ %114, %bb20.i.i ], [ %102, %bb16.i.i ], [ %80, %bb10.i.i ], [ %68, %bb7.i.i ], [ %56, %bb4.i.i ], [ %44, %bb1.i.i ]
-  %tmp2.i.i = ptrtoint i32* %.0.0.i.i to i32
-  %tmp1.i.i = inttoptr i32 %tmp2.i.i to i32*
-  %tmp4.i = ptrtoint i32* %tmp1.i.i to i32
-  %tmp3.i = inttoptr i32 %tmp4.i to i32*
-  %tmp8.i = ptrtoint i32* %tmp3.i to i32
-  %tmp6.i = inttoptr i32 %tmp8.i to i32*
-  %tmp12 = ptrtoint i32* %tmp6.i to i32
-  %tmp10 = inttoptr i32 %tmp12 to i32*
-  %tmp16 = ptrtoint i32* %tmp10 to i32
+  %.0.0.i.i = phi ptr [ %130, %bb26.i.i ], [ %124, %bb24.i.i ], [ %112, %bb20.i.i ], [ %100, %bb16.i.i ], [ %78, %bb10.i.i ], [ %66, %bb7.i.i ], [ %54, %bb4.i.i ], [ %42, %bb1.i.i ]
+  %tmp2.i.i = ptrtoint ptr %.0.0.i.i to i32
+  %tmp1.i.i = inttoptr i32 %tmp2.i.i to ptr
+  %tmp4.i = ptrtoint ptr %tmp1.i.i to i32
+  %tmp3.i = inttoptr i32 %tmp4.i to ptr
+  %tmp8.i = ptrtoint ptr %tmp3.i to i32
+  %tmp6.i = inttoptr i32 %tmp8.i to ptr
+  %tmp12 = ptrtoint ptr %tmp6.i to i32
+  %tmp10 = inttoptr i32 %tmp12 to ptr
+  %tmp16 = ptrtoint ptr %tmp10 to i32
   br label %return
 
 return:                                           ; preds = %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
-  %tmp14 = inttoptr i32 %tmp16 to i32*
-  ret i32* %tmp14
+  %tmp14 = inttoptr i32 %tmp16 to ptr
+  ret ptr %tmp14
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind

diff  --git a/llvm/test/Transforms/InstCombine/2012-6-7-vselect-bitcast.ll b/llvm/test/Transforms/InstCombine/2012-6-7-vselect-bitcast.ll
index e8b1435e3db4..cc511e288a0e 100644
--- a/llvm/test/Transforms/InstCombine/2012-6-7-vselect-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/2012-6-7-vselect-bitcast.ll
@@ -1,11 +1,15 @@
-; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-; CHECK: bitcast
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
 
-define void @foo(<16 x i8> %a, <16 x i8> %b, <4 x i32>* %c) {
+define void @foo(<16 x i8> %a, <16 x i8> %b, ptr %c) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:    store <16 x i8> [[B:%.*]], ptr [[C:%.*]], align 4
+; CHECK-NEXT:    ret void
+;
   %aa = bitcast <16 x i8> %a to <4 x i32>
   %bb = bitcast <16 x i8> %b to <4 x i32>
   %select_v = select <4 x i1> zeroinitializer, <4 x i32> %aa, <4 x i32> %bb
-  store <4 x i32> %select_v, <4 x i32>* %c, align 4
+  store <4 x i32> %select_v, ptr %c, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index 9069db0bdad4..6b48cfb8fc4a 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -4,145 +4,133 @@
 target datalayout = "e-p:64:64:64-p1:32:32:32-p2:16:16:16-n8:16:32:64"
 
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p1i8.i32(i8*, i8 addrspace(1)*, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p2i8.i32(i8*, i8 addrspace(2)*, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p1.i32(ptr, ptr addrspace(1), i32, i1) nounwind
+declare void @llvm.memcpy.p0.p2.i32(ptr, ptr addrspace(2), i32, i1) nounwind
 
 
-define i32* @combine_redundant_addrspacecast(i32 addrspace(1)* %x) nounwind {
+define ptr @combine_redundant_addrspacecast(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_redundant_addrspacecast(
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast i32 addrspace(1)* [[X:%.*]] to i32*
-; CHECK-NEXT:    ret i32* [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr
+; CHECK-NEXT:    ret ptr [[Z]]
 ;
-  %y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(3)*
-  %z = addrspacecast i32 addrspace(3)* %y to i32*
-  ret i32* %z
+  %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(3)
+  %z = addrspacecast ptr addrspace(3) %y to ptr
+  ret ptr %z
 }
 
-define <4 x i32*> @combine_redundant_addrspacecast_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+define <4 x ptr> @combine_redundant_addrspacecast_vector(<4 x ptr addrspace(1)> %x) nounwind {
 ; CHECK-LABEL: @combine_redundant_addrspacecast_vector(
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast <4 x i32 addrspace(1)*> [[X:%.*]] to <4 x i32*>
-; CHECK-NEXT:    ret <4 x i32*> [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr>
+; CHECK-NEXT:    ret <4 x ptr> [[Z]]
 ;
-  %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x i32 addrspace(3)*>
-  %z = addrspacecast <4 x i32 addrspace(3)*> %y to <4 x i32*>
-  ret <4 x i32*> %z
+  %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(3)>
+  %z = addrspacecast <4 x ptr addrspace(3)> %y to <4 x ptr>
+  ret <4 x ptr> %z
 }
 
-define float* @combine_redundant_addrspacecast_types(i32 addrspace(1)* %x) nounwind {
+define ptr @combine_redundant_addrspacecast_types(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_redundant_addrspacecast_types(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[X:%.*]] to float addrspace(1)*
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast float addrspace(1)* [[TMP1]] to float*
-; CHECK-NEXT:    ret float* [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr
+; CHECK-NEXT:    ret ptr [[Z]]
 ;
-  %y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(3)*
-  %z = addrspacecast i32 addrspace(3)* %y to float*
-  ret float* %z
+  %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(3)
+  %z = addrspacecast ptr addrspace(3) %y to ptr
+  ret ptr %z
 }
 
-define <4 x float*> @combine_redundant_addrspacecast_types_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+define <4 x ptr> @combine_redundant_addrspacecast_types_vector(<4 x ptr addrspace(1)> %x) nounwind {
 ; CHECK-LABEL: @combine_redundant_addrspacecast_types_vector(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32 addrspace(1)*> [[X:%.*]] to <4 x float addrspace(1)*>
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast <4 x float addrspace(1)*> [[TMP1]] to <4 x float*>
-; CHECK-NEXT:    ret <4 x float*> [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr>
+; CHECK-NEXT:    ret <4 x ptr> [[Z]]
 ;
-  %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x i32 addrspace(3)*>
-  %z = addrspacecast <4 x i32 addrspace(3)*> %y to <4 x float*>
-  ret <4 x float*> %z
+  %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(3)>
+  %z = addrspacecast <4 x ptr addrspace(3)> %y to <4 x ptr>
+  ret <4 x ptr> %z
 }
 
-define float addrspace(2)* @combine_addrspacecast_bitcast_1(i32 addrspace(1)* %x) nounwind {
+define ptr addrspace(2) @combine_addrspacecast_bitcast_1(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_addrspacecast_bitcast_1(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[X:%.*]] to float addrspace(1)*
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast float addrspace(1)* [[TMP1]] to float addrspace(2)*
-; CHECK-NEXT:    ret float addrspace(2)* [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2)
+; CHECK-NEXT:    ret ptr addrspace(2) [[Y]]
 ;
-  %y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(2)*
-  %z = bitcast i32 addrspace(2)* %y to float addrspace(2)*
-  ret float addrspace(2)* %z
+  %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2)
+  ret ptr addrspace(2) %y
 }
 
-define i32 addrspace(2)* @combine_addrspacecast_bitcast_2(i32 addrspace(1)* %x) nounwind {
+define ptr addrspace(2) @combine_addrspacecast_bitcast_2(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_addrspacecast_bitcast_2(
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast i32 addrspace(1)* [[X:%.*]] to i32 addrspace(2)*
-; CHECK-NEXT:    ret i32 addrspace(2)* [[Z]]
+; CHECK-NEXT:    [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2)
+; CHECK-NEXT:    ret ptr addrspace(2) [[Y]]
 ;
-  %y = addrspacecast i32 addrspace(1)* %x to float addrspace(2)*
-  %z = bitcast float addrspace(2)* %y to i32 addrspace(2)*
-  ret i32 addrspace(2)* %z
+  %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2)
+  ret ptr addrspace(2) %y
 }
 
-define i32 addrspace(2)* @combine_bitcast_addrspacecast_1(i32 addrspace(1)* %x) nounwind {
+define ptr addrspace(2) @combine_bitcast_addrspacecast_1(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_bitcast_addrspacecast_1(
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast i32 addrspace(1)* [[X:%.*]] to i32 addrspace(2)*
-; CHECK-NEXT:    ret i32 addrspace(2)* [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2)
+; CHECK-NEXT:    ret ptr addrspace(2) [[Z]]
 ;
-  %y = bitcast i32 addrspace(1)* %x to i8 addrspace(1)*
-  %z = addrspacecast i8 addrspace(1)* %y to i32 addrspace(2)*
-  ret i32 addrspace(2)* %z
+  %z = addrspacecast ptr addrspace(1) %x to ptr addrspace(2)
+  ret ptr addrspace(2) %z
 }
 
-define float addrspace(2)* @combine_bitcast_addrspacecast_2(i32 addrspace(1)* %x) nounwind {
+define ptr addrspace(2) @combine_bitcast_addrspacecast_2(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_bitcast_addrspacecast_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[X:%.*]] to float addrspace(1)*
-; CHECK-NEXT:    [[Z:%.*]] = addrspacecast float addrspace(1)* [[TMP1]] to float addrspace(2)*
-; CHECK-NEXT:    ret float addrspace(2)* [[Z]]
+; CHECK-NEXT:    [[Z:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2)
+; CHECK-NEXT:    ret ptr addrspace(2) [[Z]]
 ;
-  %y = bitcast i32 addrspace(1)* %x to i8 addrspace(1)*
-  %z = addrspacecast i8 addrspace(1)* %y to float addrspace(2)*
-  ret float addrspace(2)* %z
+  %z = addrspacecast ptr addrspace(1) %x to ptr addrspace(2)
+  ret ptr addrspace(2) %z
 }
 
-define float addrspace(2)* @combine_addrspacecast_types(i32 addrspace(1)* %x) nounwind {
+define ptr addrspace(2) @combine_addrspacecast_types(ptr addrspace(1) %x) nounwind {
 ; CHECK-LABEL: @combine_addrspacecast_types(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[X:%.*]] to float addrspace(1)*
-; CHECK-NEXT:    [[Y:%.*]] = addrspacecast float addrspace(1)* [[TMP1]] to float addrspace(2)*
-; CHECK-NEXT:    ret float addrspace(2)* [[Y]]
+; CHECK-NEXT:    [[Y:%.*]] = addrspacecast ptr addrspace(1) [[X:%.*]] to ptr addrspace(2)
+; CHECK-NEXT:    ret ptr addrspace(2) [[Y]]
 ;
-  %y = addrspacecast i32 addrspace(1)* %x to float addrspace(2)*
-  ret float addrspace(2)* %y
+  %y = addrspacecast ptr addrspace(1) %x to ptr addrspace(2)
+  ret ptr addrspace(2) %y
 }
 
-define <4 x float addrspace(2)*> @combine_addrspacecast_types_vector(<4 x i32 addrspace(1)*> %x) nounwind {
+define <4 x ptr addrspace(2)> @combine_addrspacecast_types_vector(<4 x ptr addrspace(1)> %x) nounwind {
 ; CHECK-LABEL: @combine_addrspacecast_types_vector(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32 addrspace(1)*> [[X:%.*]] to <4 x float addrspace(1)*>
-; CHECK-NEXT:    [[Y:%.*]] = addrspacecast <4 x float addrspace(1)*> [[TMP1]] to <4 x float addrspace(2)*>
-; CHECK-NEXT:    ret <4 x float addrspace(2)*> [[Y]]
+; CHECK-NEXT:    [[Y:%.*]] = addrspacecast <4 x ptr addrspace(1)> [[X:%.*]] to <4 x ptr addrspace(2)>
+; CHECK-NEXT:    ret <4 x ptr addrspace(2)> [[Y]]
 ;
-  %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x float addrspace(2)*>
-  ret <4 x float addrspace(2)*> %y
+  %y = addrspacecast <4 x ptr addrspace(1)> %x to <4 x ptr addrspace(2)>
+  ret <4 x ptr addrspace(2)> %y
 }
 
-define <vscale x 4 x float addrspace(2)*> @combine_addrspacecast_types_scalevector(<vscale x 4 x i32 addrspace(1)*> %x) nounwind {
+define <vscale x 4 x ptr addrspace(2)> @combine_addrspacecast_types_scalevector(<vscale x 4 x ptr addrspace(1)> %x) nounwind {
 ; CHECK-LABEL: @combine_addrspacecast_types_scalevector(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <vscale x 4 x i32 addrspace(1)*> [[X:%.*]] to <vscale x 4 x float addrspace(1)*>
-; CHECK-NEXT:    [[Y:%.*]] = addrspacecast <vscale x 4 x float addrspace(1)*> [[TMP1]] to <vscale x 4 x float addrspace(2)*>
-; CHECK-NEXT:    ret <vscale x 4 x float addrspace(2)*> [[Y]]
+; CHECK-NEXT:    [[Y:%.*]] = addrspacecast <vscale x 4 x ptr addrspace(1)> [[X:%.*]] to <vscale x 4 x ptr addrspace(2)>
+; CHECK-NEXT:    ret <vscale x 4 x ptr addrspace(2)> [[Y]]
 ;
-  %y = addrspacecast <vscale x 4 x i32 addrspace(1)*> %x to <vscale x 4 x float addrspace(2)*>
-  ret <vscale x 4 x float addrspace(2)*> %y
+  %y = addrspacecast <vscale x 4 x ptr addrspace(1)> %x to <vscale x 4 x ptr addrspace(2)>
+  ret <vscale x 4 x ptr addrspace(2)> %y
 }
 
 
-define i32 @canonicalize_addrspacecast([16 x i32] addrspace(1)* %arr) {
+define i32 @canonicalize_addrspacecast(ptr addrspace(1) %arr) {
 ; CHECK-LABEL: @canonicalize_addrspacecast(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [16 x i32], [16 x i32] addrspace(1)* [[ARR:%.*]], i32 0, i32 0
-; CHECK-NEXT:    [[P:%.*]] = addrspacecast i32 addrspace(1)* [[TMP1]] to i32*
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[P:%.*]] = addrspacecast ptr addrspace(1) [[ARR:%.*]] to ptr
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 [[V]]
 ;
-  %p = addrspacecast [16 x i32] addrspace(1)* %arr to i32*
-  %v = load i32, i32* %p
+  %p = addrspacecast ptr addrspace(1) %arr to ptr
+  %v = load i32, ptr %p
   ret i32 %v
 }
 
 @const_array = addrspace(2) constant [60 x i8] [i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
-                                                i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
-                                                i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
-                                                i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
-                                                i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22 ]
+  i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
+  i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
+  i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22,
+  i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22, i8 2, i8 9, i8 4, i8 22 ]
 
-declare void @foo(i8*) nounwind
+declare void @foo(ptr) nounwind
 
 ; A copy from a constant addrspacecast'ed global
 define i32 @memcpy_addrspacecast() nounwind {
@@ -153,8 +141,8 @@ define i32 @memcpy_addrspacecast() nounwind {
 ; CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_INC:%.*]], [[LOOP_BODY]] ]
 ; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_INC:%.*]], [[LOOP_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = trunc i32 [[I]] to i16
-; CHECK-NEXT:    [[PTR:%.*]] = getelementptr i8, i8 addrspace(2)* getelementptr inbounds ([60 x i8], [60 x i8] addrspace(2)* @const_array, i16 0, i16 4), i16 [[TMP0]]
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(2)* [[PTR]], align 1
+; CHECK-NEXT:    [[PTR:%.*]] = getelementptr i8, ptr addrspace(2) getelementptr inbounds ([60 x i8], ptr addrspace(2) @const_array, i16 0, i16 4), i16 [[TMP0]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(2) [[PTR]], align 1
 ; CHECK-NEXT:    [[EXT:%.*]] = zext i8 [[LOAD]] to i32
 ; CHECK-NEXT:    [[SUM_INC]] = add i32 [[SUM]], [[EXT]]
 ; CHECK-NEXT:    [[I_INC]] = add i32 [[I]], 1
@@ -165,14 +153,14 @@ define i32 @memcpy_addrspacecast() nounwind {
 ;
 entry:
   %alloca = alloca i8, i32 48
-  call void @llvm.memcpy.p0i8.p1i8.i32(i8* align 4 %alloca, i8 addrspace(1)* align 4 addrspacecast (i8 addrspace(2)* getelementptr inbounds ([60 x i8], [60 x i8] addrspace(2)* @const_array, i16 0, i16 4) to i8 addrspace(1)*), i32 48, i1 false) nounwind
+  call void @llvm.memcpy.p0.p1.i32(ptr align 4 %alloca, ptr addrspace(1) align 4 addrspacecast (ptr addrspace(2) getelementptr inbounds ([60 x i8], ptr addrspace(2) @const_array, i16 0, i16 4) to ptr addrspace(1)), i32 48, i1 false) nounwind
   br label %loop.body
 
 loop.body:
   %i = phi i32 [ 0, %entry ], [ %i.inc, %loop.body ]
   %sum = phi i32 [ 0, %entry ], [ %sum.inc, %loop.body]
-  %ptr = getelementptr i8, i8* %alloca, i32 %i
-  %load = load i8, i8* %ptr
+  %ptr = getelementptr i8, ptr %alloca, i32 %i
+  %load = load i8, ptr %ptr
   %ext = zext i8 %load to i32
   %sum.inc = add i32 %sum, %ext
   %i.inc = add i32 %i, 1
@@ -185,48 +173,48 @@ end:
 
 define void @constant_fold_null() #0 {
 ; CHECK-LABEL: @constant_fold_null(
-; CHECK-NEXT:    store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), align 4294967296
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i32 addrspace(3)* null to i32 addrspace(4)*
-  store i32 7, i32 addrspace(4)* %cast
+  %cast = addrspacecast ptr addrspace(3) null to ptr addrspace(4)
+  store i32 7, ptr addrspace(4) %cast
   ret void
 }
 
-define i32 addrspace(4)* @constant_fold_undef() #0 {
+define ptr addrspace(4) @constant_fold_undef() #0 {
 ; CHECK-LABEL: @constant_fold_undef(
-; CHECK-NEXT:    ret i32 addrspace(4)* undef
+; CHECK-NEXT:    ret ptr addrspace(4) undef
 ;
-  %cast = addrspacecast i32 addrspace(3)* undef to i32 addrspace(4)*
-  ret i32 addrspace(4)* %cast
+  %cast = addrspacecast ptr addrspace(3) undef to ptr addrspace(4)
+  ret ptr addrspace(4) %cast
 }
 
-define <4 x i32 addrspace(4)*> @constant_fold_null_vector() #0 {
+define <4 x ptr addrspace(4)> @constant_fold_null_vector() #0 {
 ; CHECK-LABEL: @constant_fold_null_vector(
-; CHECK-NEXT:    ret <4 x i32 addrspace(4)*> addrspacecast (<4 x i32 addrspace(3)*> zeroinitializer to <4 x i32 addrspace(4)*>)
+; CHECK-NEXT:    ret <4 x ptr addrspace(4)> addrspacecast (<4 x ptr addrspace(3)> zeroinitializer to <4 x ptr addrspace(4)>)
 ;
-  %cast = addrspacecast <4 x i32 addrspace(3)*> zeroinitializer to <4 x i32 addrspace(4)*>
-  ret <4 x i32 addrspace(4)*> %cast
+  %cast = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr addrspace(4)>
+  ret <4 x ptr addrspace(4)> %cast
 }
 
 define void @constant_fold_inttoptr() #0 {
 ; CHECK-LABEL: @constant_fold_inttoptr(
-; CHECK-NEXT:    store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i32 -1 to i32 addrspace(3)*) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr addrspace(4)), align 4
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i32 addrspace(3)* inttoptr (i32 -1 to i32 addrspace(3)*) to i32 addrspace(4)*
-  store i32 7, i32 addrspace(4)* %cast
+  %cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr addrspace(4)
+  store i32 7, ptr addrspace(4) %cast
   ret void
 }
 
 define void @constant_fold_gep_inttoptr() #0 {
 ; CHECK-LABEL: @constant_fold_gep_inttoptr(
-; CHECK-NEXT:    store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* inttoptr (i64 1274 to i32 addrspace(3)*) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) inttoptr (i64 1274 to ptr addrspace(3)) to ptr addrspace(4)), align 4
 ; CHECK-NEXT:    ret void
 ;
-  %k = inttoptr i32 1234 to i32 addrspace(3)*
-  %gep = getelementptr i32, i32 addrspace(3)* %k, i32 10
-  %cast = addrspacecast i32 addrspace(3)* %gep to i32 addrspace(4)*
-  store i32 7, i32 addrspace(4)* %cast
+  %k = inttoptr i32 1234 to ptr addrspace(3)
+  %gep = getelementptr i32, ptr addrspace(3) %k, i32 10
+  %cast = addrspacecast ptr addrspace(3) %gep to ptr addrspace(4)
+  store i32 7, ptr addrspace(4) %cast
   ret void
 }

diff  --git a/llvm/test/Transforms/InstCombine/alias-recursion.ll b/llvm/test/Transforms/InstCombine/alias-recursion.ll
index a99bdb58837f..e9f37ea9ec24 100644
--- a/llvm/test/Transforms/InstCombine/alias-recursion.ll
+++ b/llvm/test/Transforms/InstCombine/alias-recursion.ll
@@ -3,13 +3,13 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-pc-windows-msvc"
 
-%class.A = type { i32 (...)** }
+%class.A = type { ptr }
 
- at 0 = constant [1 x i8*] zeroinitializer
+ at 0 = constant [1 x ptr] zeroinitializer
 
- at vtbl = alias i8*, getelementptr inbounds ([1 x i8*], [1 x i8*]* @0, i32 0, i32 0)
+ at vtbl = alias ptr, ptr @0
 
-define i32 (%class.A*)* @test(i1 %c1, i1 %c2) {
+define ptr @test(i1 %c1, i1 %c2) {
 ; CHECK-LABEL: test
 entry:
   br i1 %c1, label %for.body, label %for.end
@@ -18,7 +18,7 @@ for.body:                                         ; preds = %for.body, %entry
   br i1 %c2, label %for.body, label %for.end
 
 for.end:                                          ; preds = %for.body, %entry
-  %A = phi i32 (%class.A*)** [ bitcast (i8** @vtbl to i32 (%class.A*)**), %for.body ], [ null, %entry ]
-  %B = load i32 (%class.A*)*, i32 (%class.A*)** %A
-  ret i32 (%class.A*)* %B
+  %A = phi ptr [ @vtbl, %for.body ], [ null, %entry ]
+  %B = load ptr, ptr %A
+  ret ptr %B
 }

diff  --git a/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll b/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
index 7f0e905263bf..6b7725098535 100644
--- a/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
+++ b/llvm/test/Transforms/InstCombine/alloca-cast-debuginfo.ll
@@ -9,7 +9,7 @@
 ; struct Foo {
 ;   int x, y;
 ; };
-; void escape(const void*);
+; void escape(const ptr);
 ; void f(struct Foo *p) {
 ;   struct Foo local;
 ;   *(__int64 *)&local = *(__int64 *)p;
@@ -23,35 +23,28 @@ target triple = "x86_64-pc-windows-msvc19.11.25508"
 
 %struct.Foo = type { i32, i32 }
 
-define void @f(%struct.Foo* %p) !dbg !11 {
+define void @f(ptr %p) !dbg !11 {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOCAL:%.*]] = alloca i64, align 8
-; CHECK-NEXT:    call void @llvm.dbg.declare(metadata i64* [[LOCAL]], [[META22:metadata !.*]], metadata !DIExpression()), [[DBG23:!dbg !.*]]
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast %struct.Foo* [[P:%.*]] to i64*, [[DBG24:!dbg !.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8, [[DBG24]], [[TBAA25:!tbaa !.*]]
-; CHECK-NEXT:    store i64 [[TMP1]], i64* [[LOCAL]], align 8, [[DBG29:!dbg !.*]], [[TBAA25]]
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64* [[LOCAL]] to i8*, [[DBG30:!dbg !.*]]
-; CHECK-NEXT:    call void @escape(i8* nonnull [[TMP2]]), [[DBG31:!dbg !.*]]
-; CHECK-NEXT:    ret void, [[DBG32:!dbg !.*]]
+; CHECK-NEXT:    [[LOCAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
+; CHECK-NEXT:    call void @llvm.dbg.declare(metadata ptr [[LOCAL]], metadata [[META22:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 8, !dbg [[DBG24:![0-9]+]], !tbaa [[TBAA25:![0-9]+]]
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[LOCAL]], align 8, !dbg [[DBG29:![0-9]+]], !tbaa [[TBAA25]]
+; CHECK-NEXT:    call void @escape(ptr nonnull [[LOCAL]]), !dbg [[DBG30:![0-9]+]]
+; CHECK-NEXT:    ret void, !dbg [[DBG31:![0-9]+]]
 ;
 entry:
   %local = alloca %struct.Foo, align 4
-  %0 = bitcast %struct.Foo* %local to i8*, !dbg !24
-  call void @llvm.dbg.declare(metadata %struct.Foo* %local, metadata !22, metadata !DIExpression()), !dbg !25
-  %1 = bitcast %struct.Foo* %p to i64*, !dbg !26
-  %2 = load i64, i64* %1, align 8, !dbg !26, !tbaa !27
-  %3 = bitcast %struct.Foo* %local to i64*, !dbg !31
-  store i64 %2, i64* %3, align 4, !dbg !32, !tbaa !27
-  %4 = bitcast %struct.Foo* %local to i8*, !dbg !33
-  call void @escape(i8* %4), !dbg !34
-  %5 = bitcast %struct.Foo* %local to i8*, !dbg !35
+  call void @llvm.dbg.declare(metadata ptr %local, metadata !22, metadata !DIExpression()), !dbg !25
+  %0 = load i64, ptr %p, align 8, !dbg !26, !tbaa !27
+  store i64 %0, ptr %local, align 4, !dbg !32, !tbaa !27
+  call void @escape(ptr %local), !dbg !34
   ret void, !dbg !35
 }
 
 declare void @llvm.dbg.declare(metadata, metadata, metadata)
 
-declare void @escape(i8*)
+declare void @escape(ptr)
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!6, !7, !8, !9}

diff  --git a/llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll b/llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
index 90cb95b025ac..3a8910644327 100644
--- a/llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
+++ b/llvm/test/Transforms/InstCombine/alloca-in-non-alloca-as.ll
@@ -6,39 +6,35 @@
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
 target triple = "amdgcn-amd-amdhsa"
 
-declare void @use(i8*, i32**)
+declare void @use(ptr, ptr)
 
-define weak amdgpu_kernel void @__omp_offloading_802_ea0109_main_l8(i32* %a) {
+define weak amdgpu_kernel void @__omp_offloading_802_ea0109_main_l8(ptr %a) {
 ; CHECK-LABEL: @__omp_offloading_802_ea0109_main_l8(
 ; CHECK-NEXT:  .master:
-; CHECK-NEXT:    [[TMP0:%.*]] = alloca i32*, align 1
-; CHECK-NEXT:    [[DOTSUB:%.*]] = bitcast i32** [[TMP0]] to i8*
-; CHECK-NEXT:    call void @use(i8* [[DOTSUB]], i32** [[TMP0]])
+; CHECK-NEXT:    [[TMP0:%.*]] = alloca [8 x i8], align 1
+; CHECK-NEXT:    call void @use(ptr [[TMP0]], ptr [[TMP0]])
 ; CHECK-NEXT:    ret void
 ;
 .master:
   %0 = alloca i8, i64 8, align 1
-  %a_on_stack = bitcast i8* %0 to i32**
-  store i32* undef, i32** %a_on_stack, align 8
-  call void @use(i8* %0, i32** %a_on_stack)
+  store ptr undef, ptr %0, align 8
+  call void @use(ptr %0, ptr %0)
   ret void
 }
 
 %struct.widget = type { [8 x i8] }
 
-define void @spam(i64* %arg1) {
+define void @spam(ptr %arg1) {
 ; CHECK-LABEL: @spam(
 ; CHECK-NEXT:  bb:
 ; CHECK-NEXT:    [[ALLOCA1:%.*]] = alloca [0 x [30 x %struct.widget]], align 16
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds [0 x [30 x %struct.widget]], [0 x [30 x %struct.widget]]* [[ALLOCA1]], i64 0, i64 0, i64 0
-; CHECK-NEXT:    call void @zot(%struct.widget* [[GEP]])
+; CHECK-NEXT:    call void @zot(ptr [[ALLOCA1]])
 ; CHECK-NEXT:    ret void
 ;
 bb:
   %alloca = alloca [30 x %struct.widget], i32 0, align 16
-  %gep = getelementptr inbounds [30 x %struct.widget], [30 x %struct.widget]* %alloca, i64 0, i64 0
-  call void @zot(%struct.widget* %gep)
+  call void @zot(ptr %alloca)
   ret void
 }
 
-declare hidden void @zot(%struct.widget*)
+declare hidden void @zot(ptr)

diff  --git a/llvm/test/Transforms/InstCombine/alloca.ll b/llvm/test/Transforms/InstCombine/alloca.ll
index a61e5be12c88..24129b0a1986 100644
--- a/llvm/test/Transforms/InstCombine/alloca.ll
+++ b/llvm/test/Transforms/InstCombine/alloca.ll
@@ -11,38 +11,35 @@ declare void @use(...)
 define void @test() {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    [[X:%.*]] = alloca [0 x i32], align 4
-; CHECK-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; CHECK-NEXT:    [[Y1_SUB:%.*]] = getelementptr inbounds [0 x i32], [0 x i32]* [[X]], i64 0, i64 0
-; CHECK-NEXT:    call void (...) @use(i32* nonnull [[Y1_SUB]])
-; CHECK-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; CHECK-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
+; CHECK-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; CHECK-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; CHECK-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; CHECK-NEXT:    call void (...) @use(ptr nonnull [[X]])
 ; CHECK-NEXT:    ret void
 ;
 ; P32-LABEL: @test(
 ; P32-NEXT:    [[X:%.*]] = alloca [0 x i32], align 4
-; P32-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; P32-NEXT:    [[Y1_SUB:%.*]] = getelementptr inbounds [0 x i32], [0 x i32]* [[X]], i32 0, i32 0
-; P32-NEXT:    call void (...) @use(i32* nonnull [[Y1_SUB]])
-; P32-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; P32-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
+; P32-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; P32-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; P32-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; P32-NEXT:    call void (...) @use(ptr nonnull [[X]])
 ; P32-NEXT:    ret void
 ;
 ; NODL-LABEL: @test(
 ; NODL-NEXT:    [[X:%.*]] = alloca [0 x i32], align 8
-; NODL-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; NODL-NEXT:    [[Y1_SUB:%.*]] = getelementptr inbounds [0 x i32], [0 x i32]* [[X]], i64 0, i64 0
-; NODL-NEXT:    call void (...) @use(i32* nonnull [[Y1_SUB]])
-; NODL-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
-; NODL-NEXT:    call void (...) @use([0 x i32]* nonnull [[X]])
+; NODL-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; NODL-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; NODL-NEXT:    call void (...) @use(ptr nonnull [[X]])
+; NODL-NEXT:    call void (...) @use(ptr nonnull [[X]])
 ; NODL-NEXT:    ret void
 ;
-  %X = alloca [0 x i32]           ; <[0 x i32]*> [#uses=1]
-  call void (...) @use( [0 x i32]* %X )
-  %Y = alloca i32, i32 0          ; <i32*> [#uses=1]
-  call void (...) @use( i32* %Y )
-  %Z = alloca {  }                ; <{  }*> [#uses=1]
-  call void (...) @use( {  }* %Z )
-  %size = load i32, i32* @int
+  %X = alloca [0 x i32]           ; <ptr> [#uses=1]
+  call void (...) @use( ptr %X )
+  %Y = alloca i32, i32 0          ; <ptr> [#uses=1]
+  call void (...) @use( ptr %Y )
+  %Z = alloca {  }                ; <ptr> [#uses=1]
+  call void (...) @use( ptr %Z )
+  %size = load i32, ptr @int
   %A = alloca {{}}, i32 %size
   call void (...) @use( {{}}* %A )
   ret void
@@ -53,8 +50,8 @@ define void @test2() {
 ; ALL-LABEL: @test2(
 ; ALL-NEXT:    ret void
 ;
-  %A = alloca i32         ; <i32*> [#uses=1]
-  store i32 123, i32* %A
+  %A = alloca i32         ; <ptr> [#uses=1]
+  store i32 123, ptr %A
   ret void
 }
 
@@ -63,29 +60,29 @@ define void @test3() {
 ; ALL-LABEL: @test3(
 ; ALL-NEXT:    ret void
 ;
-  %A = alloca { i32 }             ; <{ i32 }*> [#uses=1]
-  %B = getelementptr { i32 }, { i32 }* %A, i32 0, i32 0            ; <i32*> [#uses=1]
-  store i32 123, i32* %B
+  %A = alloca { i32 }             ; <ptr> [#uses=1]
+  %B = getelementptr { i32 }, ptr %A, i32 0, i32 0            ; <ptr> [#uses=1]
+  store i32 123, ptr %B
   ret void
 }
 
-define i32* @test4(i32 %n) {
+define ptr @test4(i32 %n) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[N:%.*]] to i64
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, i64 [[TMP1]], align 4
-; CHECK-NEXT:    ret i32* [[A]]
+; CHECK-NEXT:    ret ptr [[A]]
 ;
 ; P32-LABEL: @test4(
 ; P32-NEXT:    [[A:%.*]] = alloca i32, i32 [[N:%.*]], align 4
-; P32-NEXT:    ret i32* [[A]]
+; P32-NEXT:    ret ptr [[A]]
 ;
 ; NODL-LABEL: @test4(
 ; NODL-NEXT:    [[TMP1:%.*]] = zext i32 [[N:%.*]] to i64
 ; NODL-NEXT:    [[A:%.*]] = alloca i32, i64 [[TMP1]], align 4
-; NODL-NEXT:    ret i32* [[A]]
+; NODL-NEXT:    ret ptr [[A]]
 ;
   %A = alloca i32, i32 %n
-  ret i32* %A
+  ret ptr %A
 }
 
 ; Allocas which are only used by GEPs, bitcasts, addrspacecasts, and stores
@@ -98,25 +95,20 @@ define void @test5() {
 
 entry:
   %a = alloca { i32 }
-  %b = alloca i32*
+  %b = alloca ptr
   %c = alloca i32
-  %a.1 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
-  store i32 123, i32* %a.1
-  store i32* %a.1, i32** %b
-  %b.1 = bitcast i32** %b to i32*
-  store i32 123, i32* %b.1
-  %a.2 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
-  store atomic i32 2, i32* %a.2 unordered, align 4
-  %a.3 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
-  store atomic i32 3, i32* %a.3 release, align 4
-  %a.4 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
-  store atomic i32 4, i32* %a.4 seq_cst, align 4
-  %c.1 = addrspacecast i32* %c to i32 addrspace(1)*
-  store i32 123, i32 addrspace(1)* %c.1
+  store i32 123, ptr %a
+  store ptr %a, ptr %b
+  store i32 123, ptr %b
+  store atomic i32 2, ptr %a unordered, align 4
+  store atomic i32 3, ptr %a release, align 4
+  store atomic i32 4, ptr %a seq_cst, align 4
+  %c.1 = addrspacecast ptr %c to ptr addrspace(1)
+  store i32 123, ptr addrspace(1) %c.1
   ret void
 }
 
-declare void @f(i32* %p)
+declare void @f(ptr %p)
 
 ; Check that we don't delete allocas in some erroneous cases.
 define void @test6() {
@@ -124,42 +116,38 @@ define void @test6() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[A:%.*]] = alloca { i32 }, align 4
 ; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[A_1:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[A]], i64 0, i32 0
-; CHECK-NEXT:    store volatile i32 123, i32* [[A_1]], align 4
-; CHECK-NEXT:    tail call void @f(i32* nonnull [[B]])
+; CHECK-NEXT:    store volatile i32 123, ptr [[A]], align 4
+; CHECK-NEXT:    tail call void @f(ptr nonnull [[B]])
 ; CHECK-NEXT:    ret void
 ;
 ; P32-LABEL: @test6(
 ; P32-NEXT:  entry:
 ; P32-NEXT:    [[A:%.*]] = alloca { i32 }, align 4
 ; P32-NEXT:    [[B:%.*]] = alloca i32, align 4
-; P32-NEXT:    [[A_1:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[A]], i32 0, i32 0
-; P32-NEXT:    store volatile i32 123, i32* [[A_1]], align 4
-; P32-NEXT:    tail call void @f(i32* nonnull [[B]])
+; P32-NEXT:    store volatile i32 123, ptr [[A]], align 4
+; P32-NEXT:    tail call void @f(ptr nonnull [[B]])
 ; P32-NEXT:    ret void
 ;
 ; NODL-LABEL: @test6(
 ; NODL-NEXT:  entry:
 ; NODL-NEXT:    [[A:%.*]] = alloca { i32 }, align 8
 ; NODL-NEXT:    [[B:%.*]] = alloca i32, align 4
-; NODL-NEXT:    [[A_1:%.*]] = getelementptr inbounds { i32 }, { i32 }* [[A]], i64 0, i32 0
-; NODL-NEXT:    store volatile i32 123, i32* [[A_1]], align 8
-; NODL-NEXT:    tail call void @f(i32* nonnull [[B]])
+; NODL-NEXT:    store volatile i32 123, ptr [[A]], align 8
+; NODL-NEXT:    tail call void @f(ptr nonnull [[B]])
 ; NODL-NEXT:    ret void
 ;
 
 entry:
   %a = alloca { i32 }
   %b = alloca i32
-  %a.1 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
-  store volatile i32 123, i32* %a.1
-  tail call void @f(i32* %b)
+  store volatile i32 123, ptr %a
+  tail call void @f(ptr %b)
   ret void
 }
 
 ; PR14371
 %opaque_type = type opaque
-%real_type = type { { i32, i32* } }
+%real_type = type { { i32, ptr } }
 
 @opaque_global = external constant %opaque_type, align 4
 
@@ -170,67 +158,48 @@ define void @test7() {
 ;
 entry:
   %0 = alloca %real_type, align 4
-  %1 = bitcast %real_type* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* bitcast (%opaque_type* @opaque_global to i8*), i32 8, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %0, ptr @opaque_global, i32 8, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
 
 
 ; Check that the GEP indices use the pointer size, or 64 if unknown
 define void @test8() {
-; CHECK-LABEL: @test8(
-; CHECK-NEXT:    [[X1:%.*]] = alloca [100 x i32], align 4
-; CHECK-NEXT:    [[X1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[X1]], i64 0, i64 0
-; CHECK-NEXT:    call void (...) @use(i32* nonnull [[X1_SUB]])
-; CHECK-NEXT:    ret void
-;
-; P32-LABEL: @test8(
-; P32-NEXT:    [[X1:%.*]] = alloca [100 x i32], align 4
-; P32-NEXT:    [[X1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[X1]], i32 0, i32 0
-; P32-NEXT:    call void (...) @use(i32* nonnull [[X1_SUB]])
-; P32-NEXT:    ret void
-;
-; NODL-LABEL: @test8(
-; NODL-NEXT:    [[X1:%.*]] = alloca [100 x i32], align 4
-; NODL-NEXT:    [[X1_SUB:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[X1]], i64 0, i64 0
-; NODL-NEXT:    call void (...) @use(i32* nonnull [[X1_SUB]])
-; NODL-NEXT:    ret void
+; ALL-LABEL: @test8(
+; ALL-NEXT:    [[X1:%.*]] = alloca [100 x i32], align 4
+; ALL-NEXT:    call void (...) @use(ptr nonnull [[X1]])
+; ALL-NEXT:    ret void
 ;
 
 
   %x = alloca i32, i32 100
-  call void (...) @use(i32* %x)
+  call void (...) @use(ptr %x)
   ret void
 }
 
 ; PR19569
 %struct_type = type { i32, i32 }
-declare void @test9_aux(<{ %struct_type }>* inalloca(<{ %struct_type }>))
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare void @test9_aux(ptr inalloca(<{ %struct_type }>))
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 
-define void @test9(%struct_type* %a) {
+define void @test9(ptr %a) {
 ; ALL-LABEL: @test9(
 ; ALL-NEXT:  entry:
-; ALL-NEXT:    [[ARGMEM:%.*]] = alloca inalloca i64, align 8
-; ALL-NEXT:    [[TMPCAST:%.*]] = bitcast i64* [[ARGMEM]] to <{ [[STRUCT_TYPE:%.*]] }>*
-; ALL-NEXT:    [[TMP0:%.*]] = bitcast %struct_type* [[A:%.*]] to i64*
-; ALL-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 4
-; ALL-NEXT:    store i64 [[TMP1]], i64* [[ARGMEM]], align 8
-; ALL-NEXT:    call void @test9_aux(<{ [[STRUCT_TYPE]] }>* nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[TMPCAST]])
+; ALL-NEXT:    [[ARGMEM:%.*]] = alloca inalloca <{ [[STRUCT_TYPE:%.*]] }>, align 8
+; ALL-NEXT:    [[TMP0:%.*]] = load i64, ptr [[A:%.*]], align 4
+; ALL-NEXT:    store i64 [[TMP0]], ptr [[ARGMEM]], align 8
+; ALL-NEXT:    call void @test9_aux(ptr nonnull inalloca(<{ [[STRUCT_TYPE]] }>) [[ARGMEM]])
 ; ALL-NEXT:    ret void
 ;
 entry:
-  %inalloca.save = call i8* @llvm.stacksave()
+  %inalloca.save = call ptr @llvm.stacksave()
   %argmem = alloca inalloca <{ %struct_type }>
-  %0 = getelementptr inbounds <{ %struct_type }>, <{ %struct_type }>* %argmem, i32 0, i32 0
-  %1 = bitcast %struct_type* %0 to i8*
-  %2 = bitcast %struct_type* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %2, i32 8, i1 false)
-  call void @test9_aux(<{ %struct_type }>* inalloca(<{ %struct_type }>) %argmem)
-  call void @llvm.stackrestore(i8* %inalloca.save)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %argmem, ptr align 4 %a, i32 8, i1 false)
+  call void @test9_aux(ptr inalloca(<{ %struct_type }>) %argmem)
+  call void @llvm.stackrestore(ptr %inalloca.save)
   ret void
 }
 
@@ -240,14 +209,14 @@ define void @test10() {
 ; ALL-NEXT:    [[V32:%.*]] = alloca i1, align 8
 ; ALL-NEXT:    [[V64:%.*]] = alloca i1, align 8
 ; ALL-NEXT:    [[V33:%.*]] = alloca i1, align 8
-; ALL-NEXT:    call void (...) @use(i1* nonnull [[V32]], i1* nonnull [[V64]], i1* nonnull [[V33]])
+; ALL-NEXT:    call void (...) @use(ptr nonnull [[V32]], ptr nonnull [[V64]], ptr nonnull [[V33]])
 ; ALL-NEXT:    ret void
 ;
 entry:
   %v32 = alloca i1, align 8
   %v64 = alloca i1, i64 1, align 8
   %v33 = alloca i1, i33 1, align 8
-  call void (...) @use(i1* %v32, i1* %v64, i1* %v33)
+  call void (...) @use(ptr %v32, ptr %v64, ptr %v33)
   ret void
 }
 
@@ -255,11 +224,11 @@ define void @test11() {
 ; ALL-LABEL: @test11(
 ; ALL-NEXT:  entry:
 ; ALL-NEXT:    [[Y:%.*]] = alloca i32, align 4
-; ALL-NEXT:    call void (...) @use(i32* nonnull @int) [ "blah"(i32* [[Y]]) ]
+; ALL-NEXT:    call void (...) @use(ptr nonnull @int) [ "blah"(ptr [[Y]]) ]
 ; ALL-NEXT:    ret void
 ;
 entry:
   %y = alloca i32
-  call void (...) @use(i32* nonnull @int) [ "blah"(i32* %y) ]
+  call void (...) @use(ptr nonnull @int) [ "blah"(ptr %y) ]
   ret void
 }

diff  --git a/llvm/test/Transforms/InstCombine/byval.ll b/llvm/test/Transforms/InstCombine/byval.ll
index 45750869524b..e5ee9a458535 100644
--- a/llvm/test/Transforms/InstCombine/byval.ll
+++ b/llvm/test/Transforms/InstCombine/byval.ll
@@ -1,40 +1,36 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -passes=instcombine %s | FileCheck %s
 
-declare void @add_byval_callee(double*)
+declare void @add_byval_callee(ptr)
 
-declare void @add_byval_callee_2(double* byval(double))
+declare void @add_byval_callee_2(ptr byval(double))
 
-define void @add_byval(i64* %in) {
+define void @add_byval(ptr %in) {
 ; CHECK-LABEL: @add_byval(
-; CHECK-NEXT:    call void bitcast (void (double*)* @add_byval_callee to void (i64*)*)(i64* byval(i64) [[IN:%.*]])
+; CHECK-NEXT:    call void @add_byval_callee(ptr byval(i64) [[IN:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %tmp = bitcast void (double*)* @add_byval_callee to void (i64*)*
-  call void %tmp(i64* byval(i64) %in)
+  call void @add_byval_callee(ptr byval(i64) %in)
   ret void
 }
 
-define void @add_byval_2(i64* %in) {
+define void @add_byval_2(ptr %in) {
 ; CHECK-LABEL: @add_byval_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64* [[IN:%.*]] to double*
-; CHECK-NEXT:    call void @add_byval_callee_2(double* byval(double) [[TMP1]])
+; CHECK-NEXT:    call void @add_byval_callee_2(ptr byval(i64) [[IN:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %tmp = bitcast void (double*)* @add_byval_callee_2 to void (i64*)*
-  call void %tmp(i64* byval(i64) %in)
+  call void @add_byval_callee_2(ptr byval(i64) %in)
   ret void
 }
 
 %t2 = type { i8 }
 
-define void @vararg_byval(i8* %p) {
+define void @vararg_byval(ptr %p) {
 ; CHECK-LABEL: @vararg_byval(
-; CHECK-NEXT:    call void (i8, ...) @vararg_callee(i8 undef, i8* byval(i8) [[P:%.*]])
+; CHECK-NEXT:    call void (i8, ...) @vararg_callee(i8 undef, ptr byval([[T2:%.*]]) [[P:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  %tmp = bitcast i8* %p to %t2*
-  call void (i8, ...) @vararg_callee(i8 undef, %t2* byval(%t2) %tmp)
+  call void (i8, ...) @vararg_callee(i8 undef, ptr byval(%t2) %p)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/dbg-simplify-alloca-size.ll b/llvm/test/Transforms/InstCombine/dbg-simplify-alloca-size.ll
index acbed65dcafc..028b19fadf19 100644
--- a/llvm/test/Transforms/InstCombine/dbg-simplify-alloca-size.ll
+++ b/llvm/test/Transforms/InstCombine/dbg-simplify-alloca-size.ll
@@ -1,22 +1,21 @@
 ; RUN: opt -S --passes=instcombine %s | FileCheck %s
 
 ; https://github.com/llvm/llvm-project/issues/56807
-declare void @foo(i8* %pixels)
+declare void @foo(ptr %pixels)
 
 declare void @llvm.dbg.declare(metadata, metadata, metadata)
 
 ; CHECK-LABEL: @toplevel(
 ; CHECK:  entry:
 ; CHECK-NEXT:    %pixels1 = alloca [3 x i8], align 1
-; CHECK-NEXT:    call void @llvm.dbg.declare(metadata [3 x i8]* %pixels1, metadata ![[MD:[0-9]+]], metadata !DIExpression()), !dbg ![[DBG:[0-9]+]]
-; CHECK-NEXT:    %pixels1.sub = getelementptr inbounds [3 x i8], [3 x i8]* %pixels1, i64 0, i64 0
-; CHECK-NEXT:    call void @foo(i8* nonnull %pixels1.sub)
+; CHECK-NEXT:    call void @llvm.dbg.declare(metadata ptr %pixels1, metadata ![[MD:[0-9]+]], metadata !DIExpression()), !dbg ![[DBG:[0-9]+]]
+; CHECK-NEXT:    call void @foo(ptr nonnull %pixels1)
 ; CHECK-NEXT:    ret void
 define dso_local void @toplevel() {
 entry:
   %pixels = alloca i8, i32 3
-  call void @llvm.dbg.declare(metadata i8* %pixels, metadata !11, metadata !DIExpression()), !dbg !12
-  call void @foo(i8* %pixels)
+  call void @llvm.dbg.declare(metadata ptr %pixels, metadata !11, metadata !DIExpression()), !dbg !12
+  call void @foo(ptr %pixels)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
index aa7f1f3093e7..f4be83ace0c0 100644
--- a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -4,39 +4,37 @@
 ; float result
 
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-%struct.NSObject = type { %struct.objc_class* }
+%struct.NSObject = type { ptr }
 %struct.NSArray = type { %struct.NSObject }
 %struct.objc_class = type opaque
 %struct.objc_selector = type opaque
 
 @"\01L_OBJC_METH_VAR_NAME_112" = internal global [15 x i8] c"whiteComponent\00", section "__TEXT,__cstring,cstring_literals"
-@"\01L_OBJC_SELECTOR_REFERENCES_81" = internal global %struct.objc_selector* bitcast ([15 x i8]* @"\01L_OBJC_METH_VAR_NAME_112" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip"
+@"\01L_OBJC_SELECTOR_REFERENCES_81" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_112", section "__OBJC,__message_refs,literal_pointers,no_dead_strip"
 
 define void @bork() nounwind  {
 ; CHECK-LABEL: @bork(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[COLOR:%.*]] = alloca %struct.NSArray*, align 8
-; CHECK-NEXT:    [[TMP103:%.*]] = load %struct.NSArray*, %struct.NSArray** [[COLOR]], align 8
-; CHECK-NEXT:    [[TMP103104:%.*]] = getelementptr [[STRUCT_NSARRAY:%.*]], %struct.NSArray* [[TMP103]], i64 0, i32 0
-; CHECK-NEXT:    [[TMP105:%.*]] = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 8
-; CHECK-NEXT:    [[TMP107:%.*]] = call float bitcast (void (%struct.NSObject*, ...)* @objc_msgSend_fpret to float (%struct.NSObject*, %struct.objc_selector*)*)(%struct.NSObject* [[TMP103104]], %struct.objc_selector* [[TMP105]]) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[COLOR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[TMP103:%.*]] = load ptr, ptr [[COLOR]], align 8
+; CHECK-NEXT:    [[TMP105:%.*]] = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 8
+; CHECK-NEXT:    [[TMP107:%.*]] = call float @objc_msgSend_fpret(ptr [[TMP103]], ptr [[TMP105]]) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %color = alloca %struct.NSArray*
-  %color.466 = alloca %struct.NSObject*
-  %tmp103 = load %struct.NSArray*, %struct.NSArray** %color, align 4
-  %tmp103104 = getelementptr %struct.NSArray, %struct.NSArray* %tmp103, i32 0, i32 0
-  store %struct.NSObject* %tmp103104, %struct.NSObject** %color.466, align 4
-  %tmp105 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
-  %tmp106 = load %struct.NSObject*, %struct.NSObject** %color.466, align 4
-  %tmp107 = call float bitcast (void (%struct.NSObject*, ...)* @objc_msgSend_fpret to float (%struct.NSObject*, %struct.objc_selector*)*)( %struct.NSObject* %tmp106, %struct.objc_selector* %tmp105 ) nounwind
+  %color = alloca ptr
+  %color.466 = alloca ptr
+  %tmp103 = load ptr, ptr %color, align 4
+  store ptr %tmp103, ptr %color.466, align 4
+  %tmp105 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
+  %tmp106 = load ptr, ptr %color.466, align 4
+  %tmp107 = call float @objc_msgSend_fpret( ptr %tmp106, ptr %tmp105 ) nounwind
   br label %exit
 
 exit:
   ret void
 }
 
-declare void @objc_msgSend_fpret(%struct.NSObject*, ...)
+declare void @objc_msgSend_fpret(ptr, ...)

diff  --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index 834f2ee37180..e83d5055c825 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -5,77 +5,69 @@ target datalayout = "e-p:40:64:64:32-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-
 
 declare i32 @test58_d(i64 )
 
-define i1 @test59(i8* %foo) {
+define i1 @test59(ptr %foo) {
 ; CHECK-LABEL: @test59(
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], i32 8
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i40
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[FOO:%.*]], i32 2
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[GEP1]] to i40
 ; CHECK-NEXT:    [[USE:%.*]] = zext i40 [[TMP1]] to i64
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr inbounds i32, i32* %bit, i64 2
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 10
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
-  %use = ptrtoint i8* %cast1 to i64
+  %gep1 = getelementptr inbounds i32, ptr %foo, i64 2
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 10
+  %cmp = icmp ult ptr %gep1, %gep2
+  %use = ptrtoint ptr %gep1 to i64
   %call = call i32 @test58_d(i64 %use)
   ret i1 %cmp
 }
 
-define i1 @test59_as1(i8 addrspace(1)* %foo) {
+define i1 @test59_as1(ptr addrspace(1) %foo) {
 ; CHECK-LABEL: @test59_as1(
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[FOO:%.*]], i16 8
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[GEP1]] to i16
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[FOO:%.*]], i16 2
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP1]] to i16
 ; CHECK-NEXT:    [[USE:%.*]] = zext i16 [[TMP1]] to i64
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 2
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
-  %use = ptrtoint i8 addrspace(1)* %cast1 to i64
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %foo, i64 2
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i64 10
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
+  %use = ptrtoint ptr addrspace(1) %gep1 to i64
   %call = call i32 @test58_d(i64 %use)
   ret i1 %cmp
 }
 
-define i1 @test60(i8* %foo, i64 %i, i64 %j) {
+define i1 @test60(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
-; CHECK-NEXT:    ret i1 [[TMP3]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %gep1 = getelementptr inbounds i32, ptr %foo, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cmp = icmp ult ptr %gep1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
+define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60_as1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
 ; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
-; CHECK-NEXT:    ret i1 [[TMP3]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %foo, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i64 %j
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
   ret i1 %cmp
 }
 
 ; Same as test60, but look through an addrspacecast instead of a
 ; bitcast. This uses the same sized addrspace.
-define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
+define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60_addrspacecast(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32
 ; CHECK-NEXT:    [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32
@@ -83,106 +75,98 @@ define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]]
 ; CHECK-NEXT:    ret i1 [[TMP3]]
 ;
-  %bit = addrspacecast i8* %foo to i32 addrspace(3)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(3)* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %bit = addrspacecast ptr %foo to ptr addrspace(3)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(3) %bit, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cast1 = addrspacecast ptr addrspace(3) %gep1 to ptr
+  %cmp = icmp ult ptr %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
+define i1 @test60_addrspacecast_smaller(ptr %foo, i16 %i, i64 %j) {
 ; CHECK-LABEL: @test60_addrspacecast_smaller(
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %bit = addrspacecast i8* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %bit = addrspacecast ptr %foo to ptr addrspace(1)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %bit, i16 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cast1 = addrspacecast ptr addrspace(1) %gep1 to ptr
+  %cmp = icmp ult ptr %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) {
+define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
 ; CHECK-LABEL: @test60_addrspacecast_larger(
 ; CHECK-NEXT:    [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i16 [[I_TR]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(2)* %bit, i32 %i
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
-  %cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(2) %bit, i32 %i
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i16 %j
+  %cast1 = addrspacecast ptr addrspace(2) %gep1 to ptr addrspace(1)
+  %cmp = icmp ult ptr addrspace(1) %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test61(i8* %foo, i64 %i, i64 %j) {
+define i1 @test61(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test61(
-; CHECK-NEXT:    [[BIT:%.*]] = bitcast i8* [[FOO:%.*]] to i32*
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i32 [[TMP1]]
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, ptr [[FOO:%.*]], i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, i8* [[FOO]], i32 [[TMP2]]
-; CHECK-NEXT:    [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8*
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr [[FOO]], i32 [[TMP2]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr i32, i32* %bit, i64 %i
-  %gep2 = getelementptr  i8,  i8* %foo, i64 %j
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %gep1 = getelementptr i32, ptr %foo, i64 %i
+  %gep2 = getelementptr  i8,  ptr %foo, i64 %j
+  %cmp = icmp ult ptr %gep1, %gep2
   ret i1 %cmp
 ; Don't transform non-inbounds GEPs.
 }
 
-define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
+define i1 @test61_as1(ptr addrspace(1) %foo, i16 %i, i16 %j) {
 ; CHECK-LABEL: @test61_as1(
-; CHECK-NEXT:    [[BIT:%.*]] = bitcast i8 addrspace(1)* [[FOO:%.*]] to i32 addrspace(1)*
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 [[I:%.*]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* [[FOO]], i16 [[J:%.*]]
-; CHECK-NEXT:    [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)*
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]]
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(1) [[FOO]], i16 [[J:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr addrspace(1) [[GEP1]], [[GEP2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr i32, i32 addrspace(1)* %bit, i16 %i
-  %gep2 = getelementptr i8, i8 addrspace(1)* %foo, i16 %j
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %gep1 = getelementptr i32, ptr addrspace(1) %foo, i16 %i
+  %gep2 = getelementptr i8, ptr addrspace(1) %foo, i16 %j
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
   ret i1 %cmp
 ; Don't transform non-inbounds GEPs.
 }
 
 ; Negative test: GEP inbounds may cross sign boundary.
-define i1 @test62(i8* %a) {
+define i1 @test62(ptr %a) {
 ; CHECK-LABEL: @test62(
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i32 1
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8* [[A]], i32 10
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8* [[ARRAYIDX1]], [[ARRAYIDX2]]
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 10
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[ARRAYIDX1]], [[ARRAYIDX2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1
-  %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10
-  %cmp = icmp slt i8* %arrayidx1, %arrayidx2
+  %arrayidx1 = getelementptr inbounds i8, ptr %a, i64 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %a, i64 10
+  %cmp = icmp slt ptr %arrayidx1, %arrayidx2
   ret i1 %cmp
 }
 
-define i1 @test62_as1(i8 addrspace(1)* %a) {
+define i1 @test62_as1(ptr addrspace(1) %a) {
 ; CHECK-LABEL: @test62_as1(
-; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[A:%.*]], i16 1
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[A]], i16 10
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 addrspace(1)* [[ARRAYIDX1]], [[ARRAYIDX2]]
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[A:%.*]], i16 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[A]], i16 10
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr addrspace(1) [[ARRAYIDX1]], [[ARRAYIDX2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 1
-  %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 10
-  %cmp = icmp slt i8 addrspace(1)* %arrayidx1, %arrayidx2
+  %arrayidx1 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 1
+  %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %a, i64 10
+  %cmp = icmp slt ptr addrspace(1) %arrayidx1, %arrayidx2
   ret i1 %cmp
 }
 
@@ -250,23 +234,23 @@ define i1 @icmp_ashr_and_overshift(i8 %X) {
 }
 
 ; PR16244
-define i1 @test71(i8* %x) {
+define i1 @test71(ptr %x) {
 ; CHECK-LABEL: @test71(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = getelementptr i8, i8* %x, i64 8
-  %b = getelementptr inbounds i8, i8* %x, i64 8
-  %c = icmp ugt i8* %a, %b
+  %a = getelementptr i8, ptr %x, i64 8
+  %b = getelementptr inbounds i8, ptr %x, i64 8
+  %c = icmp ugt ptr %a, %b
   ret i1 %c
 }
 
-define i1 @test71_as1(i8 addrspace(1)* %x) {
+define i1 @test71_as1(ptr addrspace(1) %x) {
 ; CHECK-LABEL: @test71_as1(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = getelementptr i8, i8 addrspace(1)* %x, i64 8
-  %b = getelementptr inbounds i8, i8 addrspace(1)* %x, i64 8
-  %c = icmp ugt i8 addrspace(1)* %a, %b
+  %a = getelementptr i8, ptr addrspace(1) %x, i64 8
+  %b = getelementptr inbounds i8, ptr addrspace(1) %x, i64 8
+  %c = icmp ugt ptr addrspace(1) %a, %b
   ret i1 %c
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index 62307039f6b5..bc8bc7b74d3b 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -3,198 +3,195 @@
 
 target datalayout = "e-p:64:64:64-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 
-declare i8* @getptr()
-declare void @use(i8*)
+declare ptr @getptr()
+declare void @use(ptr)
 
-define i1 @eq_base(i8* %x, i64 %y) {
+define i1 @eq_base(ptr %x, i64 %y) {
 ; CHECK-LABEL: @eq_base(
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X:%.*]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i8* [[G]], [[X]]
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq ptr [[G]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp eq i8* %g, %x
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp eq ptr %g, %x
   ret i1 %r
 }
 
 define i1 @ne_base_commute(i64 %y) {
 ; CHECK-LABEL: @ne_base_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ne i8* [[X]], [[G]]
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne ptr [[X]], [[G]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp ne i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp ne ptr %x, %g
   ret i1 %r
 }
 
-define i1 @ne_base_inbounds(i8* %x, i64 %y) {
+define i1 @ne_base_inbounds(ptr %x, i64 %y) {
 ; CHECK-LABEL: @ne_base_inbounds(
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i64 [[Y:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp ne i8* %g, %x
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp ne ptr %g, %x
   ret i1 %r
 }
 
 define i1 @eq_base_inbounds_commute(i64 %y) {
 ; CHECK-LABEL: @eq_base_inbounds_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[Y:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp eq i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp eq ptr %x, %g
   ret i1 %r
 }
 
-define i1 @slt_base(i8* %x, i64 %y) {
+define i1 @slt_base(ptr %x, i64 %y) {
 ; CHECK-LABEL: @slt_base(
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X:%.*]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8* [[G]], [[X]]
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt ptr [[G]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp slt i8* %g, %x
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp slt ptr %g, %x
   ret i1 %r
 }
 
 define i1 @sgt_base_commute(i64 %y) {
 ; CHECK-LABEL: @sgt_base_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8* [[X]], [[G]]
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt ptr [[X]], [[G]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp sgt i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp sgt ptr %x, %g
   ret i1 %r
 }
 
-define i1 @slt_base_inbounds(i8* %x, i64 %y) {
+define i1 @slt_base_inbounds(ptr %x, i64 %y) {
 ; CHECK-LABEL: @slt_base_inbounds(
-; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, i8* [[X:%.*]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8* [[G]], [[X]]
+; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt ptr [[G]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp slt i8* %g, %x
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp slt ptr %g, %x
   ret i1 %r
 }
 
 define i1 @sgt_base_inbounds_commute(i64 %y) {
 ; CHECK-LABEL: @sgt_base_inbounds_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
-; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8* [[X]], [[G]]
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
+; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt ptr [[X]], [[G]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp sgt i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp sgt ptr %x, %g
   ret i1 %r
 }
 
-define i1 @ult_base(i8* %x, i64 %y) {
+define i1 @ult_base(ptr %x, i64 %y) {
 ; CHECK-LABEL: @ult_base(
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X:%.*]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ult i8* [[G]], [[X]]
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult ptr [[G]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp ult i8* %g, %x
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp ult ptr %g, %x
   ret i1 %r
 }
 
 define i1 @ugt_base_commute(i64 %y) {
 ; CHECK-LABEL: @ugt_base_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
-; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, i8* [[X]], i64 [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8* [[X]], [[G]]
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
+; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[X]], i64 [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt ptr [[X]], [[G]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr i8, i8* %x, i64 %y
-  %r = icmp ugt i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr i8, ptr %x, i64 %y
+  %r = icmp ugt ptr %x, %g
   ret i1 %r
 }
 
-define i1 @ult_base_inbounds(i8* %x, i64 %y) {
+define i1 @ult_base_inbounds(ptr %x, i64 %y) {
 ; CHECK-LABEL: @ult_base_inbounds(
 ; CHECK-NEXT:    [[R:%.*]] = icmp slt i64 [[Y:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp ult i8* %g, %x
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp ult ptr %g, %x
   ret i1 %r
 }
 
 define i1 @ugt_base_inbounds_commute(i64 %y) {
 ; CHECK-LABEL: @ugt_base_inbounds_commute(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
 ; CHECK-NEXT:    [[R:%.*]] = icmp slt i64 [[Y:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  %r = icmp ugt i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  %r = icmp ugt ptr %x, %g
   ret i1 %r
 }
 
-define i1 @ne_base_inbounds_use(i8* %x, i64 %y) {
+define i1 @ne_base_inbounds_use(ptr %x, i64 %y) {
 ; CHECK-LABEL: @ne_base_inbounds_use(
-; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, i8* [[X:%.*]], i64 [[Y:%.*]]
-; CHECK-NEXT:    call void @use(i8* [[G]])
+; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 [[Y:%.*]]
+; CHECK-NEXT:    call void @use(ptr [[G]])
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i64 [[Y]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  call void @use(i8* %g)
-  %r = icmp ne i8* %g, %x
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  call void @use(ptr %g)
+  %r = icmp ne ptr %g, %x
   ret i1 %r
 }
 
 define i1 @eq_base_inbounds_commute_use(i64 %y) {
 ; CHECK-LABEL: @eq_base_inbounds_commute_use(
-; CHECK-NEXT:    [[X:%.*]] = call i8* @getptr()
-; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[Y:%.*]]
-; CHECK-NEXT:    call void @use(i8* [[G]])
+; CHECK-NEXT:    [[X:%.*]] = call ptr @getptr()
+; CHECK-NEXT:    [[G:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[Y:%.*]]
+; CHECK-NEXT:    call void @use(ptr [[G]])
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[Y]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %x = call i8* @getptr() ; thwart complexity-based canonicalization
-  %g = getelementptr inbounds i8, i8* %x, i64 %y
-  call void @use(i8* %g)
-  %r = icmp eq i8* %x, %g
+  %x = call ptr @getptr() ; thwart complexity-based canonicalization
+  %g = getelementptr inbounds i8, ptr %x, i64 %y
+  call void @use(ptr %g)
+  %r = icmp eq ptr %x, %g
   ret i1 %r
 }
 
-define i1 @eq_bitcast_base([2 x i8]* %p, i64 %x) {
+define i1 @eq_bitcast_base(ptr %p, i64 %x) {
 ; CHECK-LABEL: @eq_bitcast_base(
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [2 x i8], [2 x i8]* [[P:%.*]], i64 [[X:%.*]], i64 0
-; CHECK-NEXT:    [[B:%.*]] = getelementptr [2 x i8], [2 x i8]* [[P]], i64 0, i64 0
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i8* [[GEP]], [[B]]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr [2 x i8], ptr [[P:%.*]], i64 [[X:%.*]], i64 0
+; CHECK-NEXT:    [[R:%.*]] = icmp eq ptr [[GEP]], [[P]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %gep = getelementptr [2 x i8], [2 x i8]* %p, i64 %x, i64 0
-  %b = bitcast [2 x i8]* %p to i8*
-  %r = icmp eq i8* %gep, %b
+  %gep = getelementptr [2 x i8], ptr %p, i64 %x, i64 0
+  %r = icmp eq ptr %gep, %p
   ret i1 %r
 }
 
-define i1 @eq_bitcast_base_inbounds([2 x i8]* %p, i64 %x) {
+define i1 @eq_bitcast_base_inbounds(ptr %p, i64 %x) {
 ; CHECK-LABEL: @eq_bitcast_base_inbounds(
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[X:%.*]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
-  %gep = getelementptr inbounds [2 x i8], [2 x i8]* %p, i64 %x, i64 0
-  %b = bitcast [2 x i8]* %p to i8*
-  %r = icmp eq i8* %gep, %b
+  %gep = getelementptr inbounds [2 x i8], ptr %p, i64 %x, i64 0
+  %r = icmp eq ptr %gep, %p
   ret i1 %r
 }
 
@@ -205,8 +202,8 @@ define i1 @PR8882(i64 %i) {
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[I:%.*]], 1000
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %p1 = getelementptr inbounds i32, i32* getelementptr inbounds ([1000 x i32], [1000 x i32]* @X, i64 0, i64 0), i64 %i
-  %cmp = icmp eq i32* %p1, getelementptr inbounds ([1000 x i32], [1000 x i32]* @X, i64 1, i64 0)
+  %p1 = getelementptr inbounds i32, ptr @X, i64 %i
+  %cmp = icmp eq ptr %p1, getelementptr inbounds ([1000 x i32], ptr @X, i64 1, i64 0)
   ret i1 %cmp
 }
 
@@ -218,177 +215,161 @@ define i1 @test24_as1(i64 %i) {
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[TMP1]], 1000
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %p1 = getelementptr inbounds i32, i32 addrspace(1)* getelementptr inbounds ([1000 x i32], [1000 x i32] addrspace(1)* @X_as1, i64 0, i64 0), i64 %i
-  %cmp = icmp eq i32 addrspace(1)* %p1, getelementptr inbounds ([1000 x i32], [1000 x i32] addrspace(1)* @X_as1, i64 1, i64 0)
+  %p1 = getelementptr inbounds i32, ptr addrspace(1) @X_as1, i64 %i
+  %cmp = icmp eq ptr addrspace(1) %p1, getelementptr inbounds ([1000 x i32], ptr addrspace(1) @X_as1, i64 1, i64 0)
   ret i1 %cmp
 }
 
 ; PR16244
-define i1 @test71(i8* %x) {
+define i1 @test71(ptr %x) {
 ; CHECK-LABEL: @test71(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = getelementptr i8, i8* %x, i64 8
-  %b = getelementptr inbounds i8, i8* %x, i64 8
-  %c = icmp ugt i8* %a, %b
+  %a = getelementptr i8, ptr %x, i64 8
+  %b = getelementptr inbounds i8, ptr %x, i64 8
+  %c = icmp ugt ptr %a, %b
   ret i1 %c
 }
 
-define i1 @test71_as1(i8 addrspace(1)* %x) {
+define i1 @test71_as1(ptr addrspace(1) %x) {
 ; CHECK-LABEL: @test71_as1(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %a = getelementptr i8, i8 addrspace(1)* %x, i64 8
-  %b = getelementptr inbounds i8, i8 addrspace(1)* %x, i64 8
-  %c = icmp ugt i8 addrspace(1)* %a, %b
+  %a = getelementptr i8, ptr addrspace(1) %x, i64 8
+  %b = getelementptr inbounds i8, ptr addrspace(1) %x, i64 8
+  %c = icmp ugt ptr addrspace(1) %a, %b
   ret i1 %c
 }
 
 declare i32 @test58_d(i64)
 
-define i1 @test59(i8* %foo) {
+define i1 @test59(ptr %foo) {
 ; CHECK-LABEL: @test59(
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], i64 8
-; CHECK-NEXT:    [[USE:%.*]] = ptrtoint i8* [[GEP1]] to i64
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[FOO:%.*]], i64 2
+; CHECK-NEXT:    [[USE:%.*]] = ptrtoint ptr [[GEP1]] to i64
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr inbounds i32, i32* %bit, i64 2
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 10
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
-  %use = ptrtoint i8* %cast1 to i64
+  %gep1 = getelementptr inbounds i32, ptr %foo, i64 2
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 10
+  %cmp = icmp ult ptr %gep1, %gep2
+  %use = ptrtoint ptr %gep1 to i64
   %call = call i32 @test58_d(i64 %use)
   ret i1 %cmp
 }
 
-define i1 @test59_as1(i8 addrspace(1)* %foo) {
+define i1 @test59_as1(ptr addrspace(1) %foo) {
 ; CHECK-LABEL: @test59_as1(
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[FOO:%.*]], i16 8
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[GEP1]] to i16
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[FOO:%.*]], i16 2
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[GEP1]] to i16
 ; CHECK-NEXT:    [[USE:%.*]] = zext i16 [[TMP1]] to i64
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 2
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
-  %use = ptrtoint i8 addrspace(1)* %cast1 to i64
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %foo, i64 2
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i64 10
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
+  %use = ptrtoint ptr addrspace(1) %gep1 to i64
   %call = call i32 @test58_d(i64 %use)
   ret i1 %cmp
 }
 
-define i1 @test60(i8* %foo, i64 %i, i64 %j) {
+define i1 @test60(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60(
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
-; CHECK-NEXT:    ret i1 [[TMP1]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %gep1 = getelementptr inbounds i32, ptr %foo, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cmp = icmp ult ptr %gep1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
+define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60_as1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
 ; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
-; CHECK-NEXT:    ret i1 [[TMP3]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %foo, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i64 %j
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
   ret i1 %cmp
 }
 
 ; Same as test60, but look through an addrspacecast instead of a
 ; bitcast. This uses the same sized addrspace.
-define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
+define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test60_addrspacecast(
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
-  %bit = addrspacecast i8* %foo to i32 addrspace(3)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(3)* %bit, i64 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %bit = addrspacecast ptr %foo to ptr addrspace(3)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(3) %bit, i64 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cast1 = addrspacecast ptr addrspace(3) %gep1 to ptr
+  %cmp = icmp ult ptr %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
+define i1 @test60_addrspacecast_smaller(ptr %foo, i16 %i, i64 %j) {
 ; CHECK-LABEL: @test60_addrspacecast_smaller(
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %bit = addrspacecast i8* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
-  %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
-  %cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %bit = addrspacecast ptr %foo to ptr addrspace(1)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %bit, i16 %i
+  %gep2 = getelementptr inbounds i8, ptr %foo, i64 %j
+  %cast1 = addrspacecast ptr addrspace(1) %gep1 to ptr
+  %cmp = icmp ult ptr %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) {
+define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
 ; CHECK-LABEL: @test60_addrspacecast_larger(
 ; CHECK-NEXT:    [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i16 [[I_TR]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)*
-  %gep1 = getelementptr inbounds i32, i32 addrspace(2)* %bit, i32 %i
-  %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
-  %cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2)
+  %gep1 = getelementptr inbounds i32, ptr addrspace(2) %bit, i32 %i
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %foo, i16 %j
+  %cast1 = addrspacecast ptr addrspace(2) %gep1 to ptr addrspace(1)
+  %cmp = icmp ult ptr addrspace(1) %cast1, %gep2
   ret i1 %cmp
 }
 
-define i1 @test61(i8* %foo, i64 %i, i64 %j) {
+define i1 @test61(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test61(
-; CHECK-NEXT:    [[BIT:%.*]] = bitcast i8* [[FOO:%.*]] to i32*
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i64 [[I:%.*]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, i8* [[FOO]], i64 [[J:%.*]]
-; CHECK-NEXT:    [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8*
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]]
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, ptr [[FOO:%.*]], i64 [[I:%.*]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr [[FOO]], i64 [[J:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr [[GEP1]], [[GEP2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8* %foo to i32*
-  %gep1 = getelementptr i32, i32* %bit, i64 %i
-  %gep2 = getelementptr  i8,  i8* %foo, i64 %j
-  %cast1 = bitcast i32* %gep1 to i8*
-  %cmp = icmp ult i8* %cast1, %gep2
+  %gep1 = getelementptr i32, ptr %foo, i64 %i
+  %gep2 = getelementptr  i8,  ptr %foo, i64 %j
+  %cmp = icmp ult ptr %gep1, %gep2
   ret i1 %cmp
 ; Don't transform non-inbounds GEPs.
 }
 
-define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
+define i1 @test61_as1(ptr addrspace(1) %foo, i16 %i, i16 %j) {
 ; CHECK-LABEL: @test61_as1(
-; CHECK-NEXT:    [[BIT:%.*]] = bitcast i8 addrspace(1)* [[FOO:%.*]] to i32 addrspace(1)*
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 [[I:%.*]]
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* [[FOO]], i16 [[J:%.*]]
-; CHECK-NEXT:    [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)*
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]]
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr i32, ptr addrspace(1) [[FOO:%.*]], i16 [[I:%.*]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(1) [[FOO]], i16 [[J:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult ptr addrspace(1) [[GEP1]], [[GEP2]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
-  %gep1 = getelementptr i32, i32 addrspace(1)* %bit, i16 %i
-  %gep2 = getelementptr i8, i8 addrspace(1)* %foo, i16 %j
-  %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
-  %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
+  %gep1 = getelementptr i32, ptr addrspace(1) %foo, i16 %i
+  %gep2 = getelementptr i8, ptr addrspace(1) %foo, i16 %j
+  %cmp = icmp ult ptr addrspace(1) %gep1, %gep2
   ret i1 %cmp
 ; Don't transform non-inbounds GEPs.
 }

diff  --git a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
index 46bfa8d8ed21..5e110c9d9438 100644
--- a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
@@ -2,9 +2,9 @@
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
 declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @foo(i8* nocapture, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @foo(ptr nocapture, ptr nocapture)
 
 define void @bar(i1 %flag) #0 !dbg !4 {
 ; CHECK-LABEL: @bar(
@@ -17,16 +17,14 @@ define void @bar(i1 %flag) #0 !dbg !4 {
 ; CHECK:       bb2:
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    call void @llvm.dbg.declare(metadata [1 x i8]* [[TEXT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
+; CHECK-NEXT:    call void @llvm.dbg.declare(metadata ptr [[TEXT]], metadata [[META16:![0-9]+]], metadata !DIExpression()), !dbg [[DBG24:![0-9]+]]
 ; CHECK-NEXT:    br label [[FIN:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8], [1 x i8]* [[TEXT]], i64 0, i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8], [1 x i8]* [[BUFF]], i64 0, i64 0
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 1, i8* [[TMP0]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 1, i8* [[TMP1]])
-; CHECK-NEXT:    call void @foo(i8* [[TMP1]], i8* [[TMP0]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 1, i8* [[TMP1]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 1, i8* [[TMP0]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT:    call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
 ; CHECK-NEXT:    br label [[FIN]]
 ; CHECK:       fin:
 ; CHECK-NEXT:    ret void
@@ -34,36 +32,34 @@ define void @bar(i1 %flag) #0 !dbg !4 {
 entry:
   %text = alloca [1 x i8], align 1
   %buff = alloca [1 x i8], align 1
-  %0 = getelementptr inbounds [1 x i8], [1 x i8]* %text, i64 0, i64 0
-  %1 = getelementptr inbounds [1 x i8], [1 x i8]* %buff, i64 0, i64 0
   br i1 %flag, label %if, label %else
 
 if:
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %text)
   br label %bb2
 
 bb2:
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
   br label %bb3
 
 bb3:
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
-  call void @llvm.dbg.declare(metadata [1 x i8]* %text, metadata !14, metadata !25), !dbg !26
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+  call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
+  call void @llvm.lifetime.end.p0(i64 1, ptr %text)
   br label %fin
 
 else:
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %0)
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
-  call void @foo(i8* %1, i8* %0)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %0)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+  call void @foo(ptr %buff, ptr %text)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+  call void @llvm.lifetime.end.p0(i64 1, ptr %text)
   br  label %fin
 
 fin:

diff  --git a/llvm/test/Transforms/InstCombine/non-integral-pointers.ll b/llvm/test/Transforms/InstCombine/non-integral-pointers.ll
index 0b664098410d..7c82b7ff36f5 100644
--- a/llvm/test/Transforms/InstCombine/non-integral-pointers.ll
+++ b/llvm/test/Transforms/InstCombine/non-integral-pointers.ll
@@ -4,106 +4,97 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i8 addrspace(4)* @f_0() {
+define ptr addrspace(4) @f_0() {
 ; CHECK-LABEL: @f_0(
-; CHECK-NEXT:    ret i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* null, i64 50)
+; CHECK-NEXT:    ret ptr addrspace(4) getelementptr (i8, ptr addrspace(4) null, i64 50)
 ;
-  %result = getelementptr i8, i8 addrspace(4)* null, i64 50
-  ret i8 addrspace(4)* %result
+  %result = getelementptr i8, ptr addrspace(4) null, i64 50
+  ret ptr addrspace(4) %result
 }
 
-define i8 addrspace(3)* @f_1() {
+define ptr addrspace(3) @f_1() {
 ; inttoptr is fine here since addrspace(3) is integral.
 ; CHECK-LABEL: @f_1(
-; CHECK-NEXT:    ret i8 addrspace(3)* inttoptr (i64 50 to i8 addrspace(3)*)
+; CHECK-NEXT:    ret ptr addrspace(3) inttoptr (i64 50 to ptr addrspace(3))
 ;
-  %result = getelementptr i8, i8 addrspace(3)* null, i64 50
-  ret i8 addrspace(3)* %result
+  %result = getelementptr i8, ptr addrspace(3) null, i64 50
+  ret ptr addrspace(3) %result
 }
 
-define void @f_2(i8 addrspace(4)** %ptr0, i8 addrspace(4)** %ptr1) {
+define void @f_2(ptr %ptr0, ptr %ptr1) {
 ; It is not okay to convert the load/store pair to load and store
 ; integers, since pointers in address space 4 are non-integral.
 ; CHECK-LABEL: @f_2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[VAL:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[PTR0:%.*]], align 8
-; CHECK-NEXT:    store i8 addrspace(4)* [[VAL]], i8 addrspace(4)** [[PTR1:%.*]], align 8
+; CHECK-NEXT:    [[VAL:%.*]] = load ptr addrspace(4), ptr [[PTR0:%.*]], align 8
+; CHECK-NEXT:    store ptr addrspace(4) [[VAL]], ptr [[PTR1:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %val = load i8 addrspace(4)*, i8 addrspace(4)** %ptr0
-  store i8 addrspace(4)* %val, i8 addrspace(4)** %ptr1
+  %val = load ptr addrspace(4), ptr %ptr0
+  store ptr addrspace(4) %val, ptr %ptr1
   ret void
 }
 
-define void @f_3(i8 addrspace(3)** %ptr0, i8 addrspace(3)** %ptr1) {
+define void @f_3(ptr %ptr0, ptr %ptr1) {
 ; It *is* okay to convert the load/store pair to load and store
 ; integers, since pointers in address space 3 are integral.
 ; CHECK-LABEL: @f_3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[VAL:%.*]] = load i8 addrspace(3)*, i8 addrspace(3)** [[PTR0:%.*]], align 8
-; CHECK-NEXT:    store i8 addrspace(3)* [[VAL]], i8 addrspace(3)** [[PTR1:%.*]], align 8
+; CHECK-NEXT:    [[VAL:%.*]] = load ptr addrspace(3), ptr [[PTR0:%.*]], align 8
+; CHECK-NEXT:    store ptr addrspace(3) [[VAL]], ptr [[PTR1:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %val = load i8 addrspace(3)*, i8 addrspace(3)** %ptr0
-  store i8 addrspace(3)* %val, i8 addrspace(3)** %ptr1
+  %val = load ptr addrspace(3), ptr %ptr0
+  store ptr addrspace(3) %val, ptr %ptr1
   ret void
 }
 
-define i64 @g(i8 addrspace(4)** %gp) {
+define i64 @g(ptr %gp) {
 ; CHECK-LABEL: @g(
-; CHECK-NEXT:    [[DOTPRE:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[GP:%.*]], align 8
-; CHECK-NEXT:    [[V74:%.*]] = call i8 addrspace(4)* @alloc()
-; CHECK-NEXT:    [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i8 addrspace(4)* addrspace(4)*
-; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* addrspace(4)* [[TMP1]] to i8 addrspace(4)**
-; CHECK-NEXT:    store i8 addrspace(4)* [[DOTPRE]], i8 addrspace(4)** [[TMP2]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i64 addrspace(4)*
-; CHECK-NEXT:    [[V80:%.*]] = addrspacecast i64 addrspace(4)* [[TMP3]] to i64*
-; CHECK-NEXT:    [[V81:%.*]] = load i64, i64* [[V80]], align 8
+; CHECK-NEXT:    [[DOTPRE:%.*]] = load ptr addrspace(4), ptr [[GP:%.*]], align 8
+; CHECK-NEXT:    [[V74:%.*]] = call ptr addrspace(4) @alloc()
+; CHECK-NEXT:    [[V75:%.*]] = addrspacecast ptr addrspace(4) [[V74]] to ptr
+; CHECK-NEXT:    [[V77:%.*]] = getelementptr ptr addrspace(4), ptr [[V75]], i64 -1
+; CHECK-NEXT:    store ptr addrspace(4) [[DOTPRE]], ptr [[V77]], align 8
+; CHECK-NEXT:    [[V81:%.*]] = load i64, ptr [[V77]], align 8
 ; CHECK-NEXT:    ret i64 [[V81]]
 ;
-  %.pre = load i8 addrspace(4)*, i8 addrspace(4)** %gp, align 8
-  %v74 = call i8 addrspace(4)* @alloc()
-  %v75 = addrspacecast i8 addrspace(4)* %v74 to i8*
-  %v76 = bitcast i8* %v75 to i8 addrspace(4)**
-  %v77 = getelementptr i8 addrspace(4)*, i8 addrspace(4)** %v76, i64 -1
-  store i8 addrspace(4)* %.pre, i8 addrspace(4)** %v77, align 8
-  %v80 = bitcast i8 addrspace(4)** %v77 to i64*
-  %v81 = load i64, i64* %v80, align 8
+  %.pre = load ptr addrspace(4), ptr %gp, align 8
+  %v74 = call ptr addrspace(4) @alloc()
+  %v75 = addrspacecast ptr addrspace(4) %v74 to ptr
+  %v77 = getelementptr ptr addrspace(4), ptr %v75, i64 -1
+  store ptr addrspace(4) %.pre, ptr %v77, align 8
+  %v81 = load i64, ptr %v77, align 8
   ret i64 %v81
 }
 
-define i64 @g2(i8* addrspace(4)* %gp) {
+define i64 @g2(ptr addrspace(4) %gp) {
 ; CHECK-LABEL: @g2(
-; CHECK-NEXT:    [[DOTPRE:%.*]] = load i8*, i8* addrspace(4)* [[GP:%.*]], align 8
-; CHECK-NEXT:    [[V74:%.*]] = call i8 addrspace(4)* @alloc()
-; CHECK-NEXT:    [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i8* addrspace(4)*
-; CHECK-NEXT:    store i8* [[DOTPRE]], i8* addrspace(4)* [[TMP1]], align 8
-; CHECK-NEXT:    [[V81_CAST:%.*]] = ptrtoint i8* [[DOTPRE]] to i64
+; CHECK-NEXT:    [[DOTPRE:%.*]] = load ptr, ptr addrspace(4) [[GP:%.*]], align 8
+; CHECK-NEXT:    [[V74:%.*]] = call ptr addrspace(4) @alloc()
+; CHECK-NEXT:    [[V77:%.*]] = getelementptr ptr, ptr addrspace(4) [[V74]], i64 -1
+; CHECK-NEXT:    store ptr [[DOTPRE]], ptr addrspace(4) [[V77]], align 8
+; CHECK-NEXT:    [[V81_CAST:%.*]] = ptrtoint ptr [[DOTPRE]] to i64
 ; CHECK-NEXT:    ret i64 [[V81_CAST]]
 ;
-  %.pre = load i8*, i8* addrspace(4)* %gp, align 8
-  %v74 = call i8 addrspace(4)* @alloc()
-  %v76 = bitcast i8 addrspace(4)* %v74 to i8* addrspace(4)*
-  %v77 = getelementptr i8*, i8* addrspace(4)* %v76, i64 -1
-  store i8* %.pre, i8* addrspace(4)* %v77, align 8
-  %v80 = bitcast i8* addrspace(4)* %v77 to i64 addrspace(4)*
-  %v81 = load i64, i64 addrspace(4)* %v80, align 8
+  %.pre = load ptr, ptr addrspace(4) %gp, align 8
+  %v74 = call ptr addrspace(4) @alloc()
+  %v77 = getelementptr ptr, ptr addrspace(4) %v74, i64 -1
+  store ptr %.pre, ptr addrspace(4) %v77, align 8
+  %v81 = load i64, ptr addrspace(4) %v77, align 8
   ret i64 %v81
 }
 
-declare i8 addrspace(4)* @alloc()
+declare ptr addrspace(4) @alloc()
 
-define i64 @f_4(i8 addrspace(4)* %v0) {
+define i64 @f_4(ptr addrspace(4) %v0) {
 ; CHECK-LABEL: @f_4(
-; CHECK-NEXT:    [[V6:%.*]] = call i64 bitcast (i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*)(i8 addrspace(4)* [[V0:%.*]])
+; CHECK-NEXT:    [[V6:%.*]] = call i64 @f_5(ptr addrspace(4) [[V0:%.*]])
 ; CHECK-NEXT:    ret i64 [[V6]]
 ;
-  %v5 = bitcast i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*
-  %v6 = call i64 %v5(i8 addrspace(4)* %v0)
+  %v6 = call i64 @f_5(ptr addrspace(4) %v0)
   ret i64 %v6
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/pr27703.ll b/llvm/test/Transforms/InstCombine/pr27703.ll
index 38c45c8e4ce6..c7b7c66c8a74 100644
--- a/llvm/test/Transforms/InstCombine/pr27703.ll
+++ b/llvm/test/Transforms/InstCombine/pr27703.ll
@@ -5,16 +5,18 @@ bb:
   br label %bb6
 
 bb6:
-  %.0 = phi i8** [ undef, %bb ], [ %t2, %bb6 ]
-  %tmp = load i8*, i8** %.0, align 8
-  %bc = bitcast i8* %tmp to i8**
-  %t1 = load i8*, i8** %bc, align 8
-  %t2 = bitcast i8* %t1 to i8**
+  %.0 = phi ptr [ undef, %bb ], [ %t2, %bb6 ]
+  %tmp = load ptr, ptr %.0, align 8
+  %bc = bitcast ptr %tmp to ptr
+  %t1 = load ptr, ptr %bc, align 8
+  %t2 = bitcast ptr %t1 to ptr
   br label %bb6
 
 bb206:
   ret void
 ; CHECK: phi
-; CHECK: bitcast
-; CHECK: load
+; CHECK-NEXT: load
+; CHECK-NEXT: load
+; CHECK-NEXT: br
+
 }

diff  --git a/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll b/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
index 3d0ae632e9fa..465c1db0e0ce 100644
--- a/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
+++ b/llvm/test/Transforms/InstCombine/pr33689_same_bitwidth.ll
@@ -8,44 +8,40 @@ target datalayout = "p:16:16"
 %i64_t = type i64
 
 @a = external global i16
- at b = external global i16*
+ at b = external global ptr
 
 define void @f(i1 %cond) {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[T12:%.*]] = alloca [2 x i32], align 8
+; CHECK-NEXT:    [[T1:%.*]] = alloca i64, align 8
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
 ; CHECK:       bb1:
 ; CHECK-NEXT:    unreachable
 ; CHECK:       bb2:
-; CHECK-NEXT:    [[T12_SUB:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[T12]], i16 0, i16 0
-; CHECK-NEXT:    [[T9:%.*]] = load i16*, i16** @b, align 2
-; CHECK-NEXT:    store i16 0, i16* [[T9]], align 2
-; CHECK-NEXT:    [[T10:%.*]] = load i32, i32* [[T12_SUB]], align 8
+; CHECK-NEXT:    [[T9:%.*]] = load ptr, ptr @b, align 2
+; CHECK-NEXT:    store i16 0, ptr [[T9]], align 2
+; CHECK-NEXT:    [[T10:%.*]] = load i32, ptr [[T1]], align 8
 ; CHECK-NEXT:    [[T11:%.*]] = add i32 [[T10]], -1
-; CHECK-NEXT:    store i32 [[T11]], i32* [[T12_SUB]], align 8
+; CHECK-NEXT:    store i32 [[T11]], ptr [[T1]], align 8
 ; CHECK-NEXT:    ret void
 ;
 bb0:
   %t1 = alloca %i64_t
-  %t2 = bitcast %i64_t* %t1 to i32*
-  %useless3 = bitcast %i64_t* %t1 to i16*
-  %useless4 = getelementptr inbounds i16, i16* %useless3, i16 undef
-  %useless5 = bitcast i16* %useless4 to i32*
+  %useless4 = getelementptr inbounds i16, ptr %t1, i16 undef
   br i1 %cond, label %bb1, label %bb2
 
 bb1:                                              ; preds = %bb0
-  %useless6 = insertvalue [1 x i32*] undef, i32* %t2, 0
-  %useless7 = insertvalue [1 x i32*] %useless6, i32* null, 0
-  %t8 = ptrtoint i32* %t2 to i16
-  store i16 %t8, i16* @a
+  %useless6 = insertvalue [1 x ptr] undef, ptr %t1, 0
+  %useless7 = insertvalue [1 x ptr] %useless6, ptr null, 0
+  %t8 = ptrtoint ptr %t1 to i16
+  store i16 %t8, ptr @a
   unreachable
 
 bb2:                                              ; preds = %bb0
-  %t9 = load i16*, i16** @b
-  store i16 0, i16* %t9
-  %t10 = load i32, i32* %t2
+  %t9 = load ptr, ptr @b
+  store i16 0, ptr %t9
+  %t10 = load i32, ptr %t1
   %t11 = sub i32 %t10, 1
-  store i32 %t11, i32* %t2
+  store i32 %t11, ptr %t1
   ret void
 }

diff  --git a/llvm/test/Transforms/InstCombine/pr39908.ll b/llvm/test/Transforms/InstCombine/pr39908.ll
index f07e091a674b..0f0eae68439d 100644
--- a/llvm/test/Transforms/InstCombine/pr39908.ll
+++ b/llvm/test/Transforms/InstCombine/pr39908.ll
@@ -5,45 +5,45 @@ target datalayout = "p:32:32"
 
 %S = type { [2 x i32] }
 
-define i1 @test([0 x %S]* %p, i32 %n) {
+define i1 @test(ptr %p, i32 %n) {
 ; CHECK-LABEL: @test(
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 1
+; CHECK-NEXT:    [[END:%.*]] = getelementptr inbounds [0 x %S], ptr [[P:%.*]], i32 0, i32 [[N:%.*]], i32 0, i32 0
+; CHECK-NEXT:    [[LAST:%.*]] = getelementptr inbounds [[S:%.*]], ptr [[END]], i32 -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[LAST]], [[P]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %start.cast = bitcast [0 x %S]* %p to %S*
-  %end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i32 0, i32 %n, i32 0, i32 0
-  %end.cast = bitcast i32* %end to %S*
-  %last = getelementptr inbounds %S, %S* %end.cast, i32 -1
-  %cmp = icmp eq %S* %last, %start.cast
+  %end = getelementptr inbounds [0 x %S], ptr %p, i32 0, i32 %n, i32 0, i32 0
+  %last = getelementptr inbounds %S, ptr %end, i32 -1
+  %cmp = icmp eq ptr %last, %p
   ret i1 %cmp
 }
 
 ; Same test using 64-bit indices.
-define i1 @test64([0 x %S]* %p, i64 %n) {
+define i1 @test64(ptr %p, i64 %n) {
 ; CHECK-LABEL: @test64(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
+; CHECK-NEXT:    [[END:%.*]] = getelementptr inbounds [0 x %S], ptr [[P:%.*]], i32 0, i32 [[TMP1]], i32 0, i32 0
+; CHECK-NEXT:    [[LAST:%.*]] = getelementptr inbounds [[S:%.*]], ptr [[END]], i32 -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[LAST]], [[P]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %start.cast = bitcast [0 x %S]* %p to %S*
-  %end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i64 0, i64 %n, i32 0, i64 0
-  %end.cast = bitcast i32* %end to %S*
-  %last = getelementptr inbounds %S, %S* %end.cast, i64 -1
-  %cmp = icmp eq %S* %last, %start.cast
+  %end = getelementptr inbounds [0 x %S], ptr %p, i64 0, i64 %n, i32 0, i64 0
+  %last = getelementptr inbounds %S, ptr %end, i64 -1
+  %cmp = icmp eq ptr %last, %p
   ret i1 %cmp
 }
 
 ; Here the offset overflows and is treated modulo 2^32. This is UB.
-define i1 @test64_overflow([0 x %S]* %p, i64 %n) {
+define i1 @test64_overflow(ptr %p, i64 %n) {
 ; CHECK-LABEL: @test64_overflow(
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
+; CHECK-NEXT:    [[END:%.*]] = getelementptr inbounds [0 x %S], ptr [[P:%.*]], i32 0, i32 [[TMP1]], i32 0, i32 0
+; CHECK-NEXT:    [[LAST:%.*]] = getelementptr inbounds [[S:%.*]], ptr [[END]], i32 -1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[LAST]], [[P]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %start.cast = bitcast [0 x %S]* %p to %S*
-  %end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i64 0, i64 %n, i32 0, i64 8589934592
-  %end.cast = bitcast i32* %end to %S*
-  %last = getelementptr inbounds %S, %S* %end.cast, i64 -1
-  %cmp = icmp eq %S* %last, %start.cast
+  %end = getelementptr inbounds [0 x %S], ptr %p, i64 0, i64 %n, i32 0, i64 8589934592
+  %last = getelementptr inbounds %S, ptr %end, i64 -1
+  %cmp = icmp eq ptr %last, %p
   ret i1 %cmp
 }

diff  --git a/llvm/test/Transforms/InstCombine/pr44242.ll b/llvm/test/Transforms/InstCombine/pr44242.ll
index a6934d2f9140..e7b0f7da0347 100644
--- a/llvm/test/Transforms/InstCombine/pr44242.ll
+++ b/llvm/test/Transforms/InstCombine/pr44242.ll
@@ -89,7 +89,7 @@ define void @store_volatile(float %x) {
 ; CHECK-NEXT:    [[VAL_INCR_CASTED]] = bitcast float [[VAL_INCR]] to i32
 ; CHECK-NEXT:    br label [[LOOP_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store volatile i32 [[VAL]], i32* @global, align 4
+; CHECK-NEXT:    store volatile i32 [[VAL]], ptr @global, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -104,7 +104,7 @@ loop:
   %val_incr_casted = bitcast float %val_incr to i32
   br label %loop_header
 end:
-  store volatile i32 %val, i32* @global
+  store volatile i32 %val, ptr @global
   ret void
 }
 
@@ -114,31 +114,29 @@ define void @store_address(i32 %x) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
 ; CHECK:       loop_header:
-; CHECK-NEXT:    [[VAL:%.*]] = phi i32* [ @global, [[ENTRY:%.*]] ], [ [[VAL_INCR1:%.*]], [[LOOP:%.*]] ]
+; CHECK-NEXT:    [[VAL:%.*]] = phi ptr [ @global, [[ENTRY:%.*]] ], [ [[VAL_INCR:%.*]], [[LOOP:%.*]] ]
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP]], label [[END:%.*]], label [[LOOP]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[VAL_INCR1]] = getelementptr i32, i32* [[VAL]], i64 1
+; CHECK-NEXT:    [[VAL_INCR]] = getelementptr float, ptr [[VAL]], i64 1
 ; CHECK-NEXT:    br label [[LOOP_HEADER]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 0, i32* [[VAL]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[VAL]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   br label %loop_header
 loop_header:
-  %val = phi i32* [ @global, %entry ], [ %val_incr_casted, %loop ]
+  %val = phi ptr [ @global, %entry ], [ %val_incr, %loop ]
   %i = phi i32 [ 0, %entry ], [ %i_incr, %loop ]
-  %val_casted = bitcast i32* %val to float*
   %cmp = icmp sgt i32 %i, %x
   br i1 %cmp, label %end, label %loop
 loop:
   %i_incr = add i32 %i, 0
-  %val_incr = getelementptr float, float* %val_casted, i32 1
-  %val_incr_casted = bitcast float* %val_incr to i32*
+  %val_incr = getelementptr float, ptr %val, i32 1
   br label %loop_header
 end:
-  store i32 0, i32* %val
+  store i32 0, ptr %val
   ret void
 }
 
@@ -162,7 +160,7 @@ define i32 @multiple_phis(float %x) {
 ; CHECK-NEXT:    br label [[LOOP_END]]
 ; CHECK:       loop_end:
 ; CHECK-NEXT:    [[VAL2]] = phi i32 [ [[VAL]], [[LOOP]] ], [ [[VAL_INCR_CASTED]], [[IF]] ]
-; CHECK-NEXT:    store volatile i32 [[VAL2]], i32* @global, align 4
+; CHECK-NEXT:    store volatile i32 [[VAL2]], ptr @global, align 4
 ; CHECK-NEXT:    br label [[LOOP_HEADER]]
 ; CHECK:       end:
 ; CHECK-NEXT:    ret i32 [[VAL]]
@@ -183,7 +181,7 @@ if:
   br label %loop_end
 loop_end:
   %val2 = phi i32 [ %val, %loop ], [ %val_incr_casted, %if ]
-  store volatile i32 %val2, i32* @global ; the incompatible use
+  store volatile i32 %val2, ptr @global ; the incompatible use
   br label %loop_header
 end:
   ret i32 %val

diff  --git a/llvm/test/Transforms/InstCombine/pr44245.ll b/llvm/test/Transforms/InstCombine/pr44245.ll
index 3d781a325065..c1a1153067d3 100644
--- a/llvm/test/Transforms/InstCombine/pr44245.ll
+++ b/llvm/test/Transforms/InstCombine/pr44245.ll
@@ -8,135 +8,134 @@ define void @test(i1 %c) {
 ; CHECK-NEXT:  bb16:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[BB17:%.*]], label [[BB24:%.*]]
 ; CHECK:       bb17:
-; CHECK-NEXT:    [[I:%.*]] = phi i8* [ [[TMP0:%.*]], [[BB47:%.*]] ], [ undef, [[BB16:%.*]] ]
-; CHECK-NEXT:    store i8* [[I]], i8** undef, align 8
+; CHECK-NEXT:    [[I:%.*]] = phi ptr [ [[TMP0:%.*]], [[BB47:%.*]] ], [ undef, [[BB16:%.*]] ]
+; CHECK-NEXT:    store ptr [[I]], ptr undef, align 8
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb24:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB44:%.*]], label [[BB49:%.*]]
 ; CHECK:       bb44:
-; CHECK-NEXT:    [[TMP467:%.*]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP467:%.*]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb47:
-; CHECK-NEXT:    [[TMP0]] = phi i8* [ [[TMP1:%.*]], [[BB150:%.*]] ], [ [[TMP1221:%.*]], [[BB119:%.*]] ], [ [[TMP1032:%.*]], [[BB101:%.*]] ], [ [[TMP933:%.*]], [[BB91:%.*]] ], [ [[TMP834:%.*]], [[BB81:%.*]] ], [ [[TMP705:%.*]], [[BB67:%.*]] ], [ [[TMP586:%.*]], [[BB56:%.*]] ], [ [[TMP467]], [[BB44]] ]
+; CHECK-NEXT:    [[TMP0]] = phi ptr [ [[TMP1:%.*]], [[BB150:%.*]] ], [ [[TMP1221:%.*]], [[BB119:%.*]] ], [ [[TMP1032:%.*]], [[BB101:%.*]] ], [ [[TMP933:%.*]], [[BB91:%.*]] ], [ [[TMP834:%.*]], [[BB81:%.*]] ], [ [[TMP705:%.*]], [[BB67:%.*]] ], [ [[TMP586:%.*]], [[BB56:%.*]] ], [ [[TMP467]], [[BB44]] ]
 ; CHECK-NEXT:    br label [[BB17]]
 ; CHECK:       bb49:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB56]], label [[BB59:%.*]]
 ; CHECK:       bb56:
-; CHECK-NEXT:    [[TMP586]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP586]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb59:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB67]], label [[BB71:%.*]]
 ; CHECK:       bb67:
-; CHECK-NEXT:    [[TMP705]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP705]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb71:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB81]], label [[BB84:%.*]]
 ; CHECK:       bb81:
-; CHECK-NEXT:    [[TMP834]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP834]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb84:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB91]], label [[BB94:%.*]]
 ; CHECK:       bb91:
-; CHECK-NEXT:    [[TMP933]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP933]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb94:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB101]], label [[BB104:%.*]]
 ; CHECK:       bb101:
-; CHECK-NEXT:    [[TMP1032]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP1032]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb104:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB119]], label [[BB123:%.*]]
 ; CHECK:       bb119:
-; CHECK-NEXT:    [[TMP1221]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP1221]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb123:
 ; CHECK-NEXT:    br i1 [[C]], label [[BB147:%.*]], label [[BB152:%.*]]
 ; CHECK:       bb147:
-; CHECK-NEXT:    [[TMP1499:%.*]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
+; CHECK-NEXT:    [[TMP1499:%.*]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
 ; CHECK-NEXT:    br label [[BB150]]
 ; CHECK:       bb150:
-; CHECK-NEXT:    [[TMP1]] = phi i8* [ [[TMP1848:%.*]], [[BB152]] ], [ [[TMP1499]], [[BB147]] ]
+; CHECK-NEXT:    [[TMP1]] = phi ptr [ [[TMP1848:%.*]], [[BB152]] ], [ [[TMP1499]], [[BB147]] ]
 ; CHECK-NEXT:    br label [[BB47]]
 ; CHECK:       bb152:
-; CHECK-NEXT:    [[TMP1848]] = load i8*, i8** inttoptr (i64 16 to i8**), align 16
-; CHECK-NEXT:    store i1 true, i1* poison, align 1
+; CHECK-NEXT:    [[TMP1848]] = load ptr, ptr inttoptr (i64 16 to ptr), align 16
+; CHECK-NEXT:    store i1 true, ptr poison, align 1
 ; CHECK-NEXT:    br label [[BB150]]
 ;
 bb16:
   br i1 %c, label %bb17, label %bb24
 
 bb17:                                             ; preds = %bb47, %bb16
-  %i = phi i8* [ %i1, %bb47 ], [ undef, %bb16 ]
-  store i8* %i, i8** undef, align 8
+  %i = phi ptr [ %.in1, %bb47 ], [ undef, %bb16 ]
+  store ptr %i, ptr undef, align 8
   ret void
 
 bb24:                                             ; preds = %bb16
   br i1 %c, label %bb44, label %bb49
 
 bb44:                                             ; preds = %bb24
-  %tmp46 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp46 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb47:                                             ; preds = %bb150, %bb119, %bb101, %bb91, %bb81, %bb67, %bb56, %bb44
-  %.in1 = phi i64* [ %.in, %bb150 ], [ %tmp122, %bb119 ], [ %tmp103, %bb101 ], [ %tmp93, %bb91 ], [ %tmp83, %bb81 ], [ %tmp70, %bb67 ], [ %tmp58, %bb56 ], [ %tmp46, %bb44 ]
-  %i1 = bitcast i64* %.in1 to i8*
+  %.in1 = phi ptr [ %.in, %bb150 ], [ %tmp122, %bb119 ], [ %tmp103, %bb101 ], [ %tmp93, %bb91 ], [ %tmp83, %bb81 ], [ %tmp70, %bb67 ], [ %tmp58, %bb56 ], [ %tmp46, %bb44 ]
   br label %bb17
 
 bb49:                                             ; preds = %bb24
   br i1 %c, label %bb56, label %bb59
 
 bb56:                                             ; preds = %bb49
-  %tmp58 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp58 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb59:                                             ; preds = %bb49
   br i1 %c, label %bb67, label %bb71
 
 bb67:                                             ; preds = %bb59
-  %tmp70 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp70 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb71:                                             ; preds = %bb59
   br i1 %c, label %bb81, label %bb84
 
 bb81:                                             ; preds = %bb71
-  %tmp83 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp83 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb84:                                             ; preds = %bb71
   br i1 %c, label %bb91, label %bb94
 
 bb91:                                             ; preds = %bb84
-  %tmp93 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp93 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb94:                                             ; preds = %bb84
   br i1 %c, label %bb101, label %bb104
 
 bb101:                                            ; preds = %bb94
-  %tmp103 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp103 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb104:                                            ; preds = %bb94
   br i1 %c, label %bb119, label %bb123
 
 bb119:                                            ; preds = %bb104
-  %tmp122 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp122 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb47
 
 bb123:                                            ; preds = %bb104
   br i1 %c, label %bb147, label %bb152
 
 bb147:                                            ; preds = %bb123
-  %tmp149 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp149 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   br label %bb150
 
 bb150:                                            ; preds = %bb152, %bb147
-  %.in = phi i64* [ %tmp184, %bb152 ], [ %tmp149, %bb147 ]
+  %.in = phi ptr [ %tmp184, %bb152 ], [ %tmp149, %bb147 ]
   br label %bb47
 
 bb152:                                            ; preds = %bb123
-  %tmp184 = load i64*, i64** inttoptr (i64 16 to i64**), align 16
+  %tmp184 = load ptr, ptr inttoptr (i64 16 to ptr), align 16
   call void undef()
   br label %bb150
 }
@@ -159,7 +158,7 @@ define void @test_2(i1 %c) local_unnamed_addr {
 ; CHECK:       cond.true133:
 ; CHECK-NEXT:    br label [[COND_END144:%.*]]
 ; CHECK:       cond.false138:
-; CHECK-NEXT:    store %type_2* poison, %type_2** null, align 4294967296
+; CHECK-NEXT:    store ptr poison, ptr null, align 4294967296
 ; CHECK-NEXT:    br label [[COND_END144]]
 ; CHECK:       cond.end144:
 ; CHECK-NEXT:    br label [[WHILE_COND]]
@@ -168,24 +167,21 @@ entry:
   br label %while.cond
 
 while.cond:                                       ; preds = %cond.end144, %entry
-  %link.0 = phi %type_2* [ undef, %entry ], [ %cond145, %cond.end144 ]
-  %os115 = bitcast %type_2* %link.0 to %type_3*
-  %ou116 = getelementptr inbounds %type_3, %type_3* %os115, i32 0
-  %os1117 = bitcast %type_3* %ou116 to %type_1*
+  %link.0 = phi ptr [ undef, %entry ], [ %cond145, %cond.end144 ]
   br label %for.cond
 
 for.cond:                                         ; preds = %while.cond
   br i1 %c, label %cond.true133, label %cond.false138
 
 cond.true133:                                     ; preds = %for.cond
-  %i = load %type_2*, %type_2** undef, align 8
+  %i = load ptr, ptr undef, align 8
   br label %cond.end144
 
 cond.false138:                                    ; preds = %for.cond
-  %i1 = load %type_2*, %type_2** undef, align 8
+  %i1 = load ptr, ptr undef, align 8
   br label %cond.end144
 
 cond.end144:                                      ; preds = %cond.false138, %cond.true133
-  %cond145 = phi %type_2* [ %i, %cond.true133 ], [ %i1, %cond.false138 ]
+  %cond145 = phi ptr [ %i, %cond.true133 ], [ %i1, %cond.false138 ]
   br label %while.cond
 }

diff  --git a/llvm/test/Transforms/InstCombine/pr58901.ll b/llvm/test/Transforms/InstCombine/pr58901.ll
index cab720185777..f94c3f131b2b 100644
--- a/llvm/test/Transforms/InstCombine/pr58901.ll
+++ b/llvm/test/Transforms/InstCombine/pr58901.ll
@@ -1,23 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
-define i32* @f1([6 x i32]* %arg, i64 %arg1) {
+define ptr @f1(ptr %arg, i64 %arg1) {
 ; CHECK-LABEL: @f1(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [6 x i32], [6 x i32]* [[ARG:%.*]], i64 3, i64 [[ARG1:%.*]]
-; CHECK-NEXT:    ret i32* [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [6 x i32], ptr [[ARG:%.*]], i64 3, i64 [[ARG1:%.*]]
+; CHECK-NEXT:    ret ptr [[TMP1]]
 ;
-  %1 = getelementptr [6 x i32], [6 x i32]* %arg, i64 3
-  %2 = getelementptr [6 x i32], [6 x i32]* %1, i64 0, i64 %arg1
-  ret i32* %2
+  %1 = getelementptr [6 x i32], ptr %arg, i64 3
+  %2 = getelementptr [6 x i32], ptr %1, i64 0, i64 %arg1
+  ret ptr %2
 }
 
-define i32* @f2([6 x i32]* %arg, i64 %arg1) {
+define ptr @f2(ptr %arg, i64 %arg1) {
 ; CHECK-LABEL: @f2(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [6 x i32], [6 x i32]* [[ARG:%.*]], i64 3
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [6 x i32], [6 x i32]* [[TMP1]], i64 [[ARG1:%.*]], i64 [[ARG1]]
-; CHECK-NEXT:    ret i32* [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [6 x i32], ptr [[ARG:%.*]], i64 3
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr [6 x i32], ptr [[TMP1]], i64 [[ARG1:%.*]], i64 [[ARG1]]
+; CHECK-NEXT:    ret ptr [[TMP2]]
 ;
-  %1 = getelementptr [6 x i32], [6 x i32]* %arg, i64 3
-  %2 = getelementptr [6 x i32], [6 x i32]* %1, i64 %arg1, i64 %arg1
-  ret i32* %2
+  %1 = getelementptr [6 x i32], ptr %arg, i64 3
+  %2 = getelementptr [6 x i32], ptr %1, i64 %arg1, i64 %arg1
+  ret ptr %2
 }

diff  --git a/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll b/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
index e3c9760ebd53..ef0734b883f8 100644
--- a/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
+++ b/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll
@@ -1,138 +1,125 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=instcombine -S < %s | FileCheck %s
 
-define void @fixed_array16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
+define void @fixed_array16i32_to_scalable4i32(ptr %out) {
 ; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca [16 x i32], align 16
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast [16 x i32]* [[TMP]] to <vscale x 4 x i32>*
-; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
-; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
+; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca [16 x i32], align 16
-  %cast = bitcast [16 x i32]* %tmp to <vscale x 4 x i32>*
-  store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
-  %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
-  store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
+  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
+  store <vscale x 4 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @scalable4i32_to_fixed16i32(<16 x i32>* %out) {
+define void @scalable4i32_to_fixed16i32(ptr %out) {
 ; CHECK-LABEL: @scalable4i32_to_fixed16i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 4 x i32>* [[TMP]] to <16 x i32>*
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <vscale x 4 x i32>, align 16
-  %cast = bitcast <vscale x 4 x i32>* %tmp to <16 x i32>*
-  store <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
-  %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
-  store <16 x i32> %reload, <16 x i32>* %out, align 16
+  store <16 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <16 x i32>, ptr %tmp, align 16
+  store <16 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @fixed16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
+define void @fixed16i32_to_scalable4i32(ptr %out) {
 ; CHECK-LABEL: @fixed16i32_to_scalable4i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca <16 x i32>, align 16
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast <16 x i32>* [[TMP]] to <vscale x 4 x i32>*
-; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
-; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
+; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <16 x i32>, align 16
-  %cast = bitcast <16 x i32>* %tmp to <vscale x 4 x i32>*
-  store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
-  %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
-  store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
+  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
+  store <vscale x 4 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @scalable16i32_to_fixed16i32(<16 x i32>* %out) {
+define void @scalable16i32_to_fixed16i32(ptr %out) {
 ; CHECK-LABEL: @scalable16i32_to_fixed16i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 16 x i32>* [[TMP]] to <16 x i32>*
-; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <vscale x 16 x i32>, align 16
-  %cast = bitcast <vscale x 16 x i32>* %tmp to <16 x i32>*
-  store volatile <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
-  %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
-  store <16 x i32> %reload, <16 x i32>* %out, align 16
+  store volatile <16 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <16 x i32>, ptr %tmp, align 16
+  store <16 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @scalable32i32_to_scalable16i32(<vscale x 16 x i32>* %out) {
+define void @scalable32i32_to_scalable16i32(ptr %out) {
 ; CHECK-LABEL: @scalable32i32_to_scalable16i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 32 x i32>* [[TMP]] to <vscale x 16 x i32>*
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <vscale x 32 x i32>, align 16
-  %cast = bitcast <vscale x 32 x i32>* %tmp to <vscale x 16 x i32>*
-  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
-  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
-  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
+  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
+  store <vscale x 16 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @scalable32i16_to_scalable16i32(<vscale x 16 x i32>* %out) {
+define void @scalable32i16_to_scalable16i32(ptr %out) {
 ; CHECK-LABEL: @scalable32i16_to_scalable16i32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[TMP]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[TMP]], align 64
-; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
+; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <vscale x 32 x i16>, align 16
-  %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
-  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
-  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
-  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
+  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
+  store <vscale x 16 x i32> %reload, ptr %out, align 16
   ret void
 }
 
-define void @scalable32i16_to_scalable16i32_multiuse(<vscale x 16 x i32>* %out, <vscale x 32 x i16>* %out2) {
+define void @scalable32i16_to_scalable16i32_multiuse(ptr %out, ptr %out2) {
 ; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 32 x i16>* [[TMP]] to <vscale x 16 x i32>*
-; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
-; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
-; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* [[TMP]], align 64
-; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], <vscale x 32 x i16>* [[OUT2:%.*]], align 16
+; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
+; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 64
+; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], ptr [[OUT2:%.*]], align 16
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %tmp = alloca <vscale x 32 x i16>, align 16
-  %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
-  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
-  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
-  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
-  %reload2 = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* %tmp, align 16
-  store <vscale x 32 x i16> %reload2, <vscale x 32 x i16>* %out2, align 16
+  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
+  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
+  store <vscale x 16 x i32> %reload, ptr %out, align 16
+  %reload2 = load volatile <vscale x 32 x i16>, ptr %tmp, align 16
+  store <vscale x 32 x i16> %reload2, ptr %out2, align 16
   ret void
 }

diff  --git a/llvm/test/Transforms/InstCombine/select-cmp-br.ll b/llvm/test/Transforms/InstCombine/select-cmp-br.ll
index 222e7a3e69da..92dc562281b8 100644
--- a/llvm/test/Transforms/InstCombine/select-cmp-br.ll
+++ b/llvm/test/Transforms/InstCombine/select-cmp-br.ll
@@ -2,239 +2,205 @@
 ; Replace a 'select' with 'or' in 'select - cmp [eq|ne] - br' sequence
 ; RUN: opt -passes=instcombine -S < %s | FileCheck %s
 
-%struct.S = type { i64*, i32, i32 }
+%struct.S = type { ptr, i32, i32 }
 %C = type <{ %struct.S }>
 
-declare void @bar(%struct.S*)
+declare void @bar(ptr)
 declare void @foobar()
 
-define void @test1(%C* %arg) {
+define void @test1(ptr %arg) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    [[M:%.*]] = load i64*, i64** [[TMP]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
-; CHECK-NEXT:    [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64* [[M]], [[N]]
-; CHECK-NEXT:    [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
-; CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i1 true, i1 [[TMP71]]
-; CHECK-NEXT:    br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK-NEXT:    [[M:%.*]] = load ptr, ptr [[ARG:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C:%.*]], ptr [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT:    [[N:%.*]] = load ptr, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP5_NOT:%.*]] = icmp eq ptr [[M]], [[N]]
+; CHECK-NEXT:    br i1 [[TMP5_NOT]], label [[BB8:%.*]], label [[BB10:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb8:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
-; CHECK-NEXT:    tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT:    tail call void @bar(ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb10:
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[M]], i64 9
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB]]
 ;
 entry:
-  %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
-  %m = load i64*, i64** %tmp, align 8
-  %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
-  %n = load i64*, i64** %tmp1, align 8
-  %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
-  %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
-  %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
-  %tmp5 = icmp eq i64* %m, %n
-  %tmp6 = select i1 %tmp5, %C* %arg, %C* null
-  %tmp7 = icmp eq %C* %tmp6, null
+  %m = load ptr, ptr %arg, align 8
+  %tmp1 = getelementptr inbounds %C, ptr %arg, i64 1, i32 0, i32 0
+  %n = load ptr, ptr %tmp1, align 8
+  %tmp2 = getelementptr inbounds i64, ptr %m, i64 9
+  %tmp4 = load ptr, ptr %tmp2, align 8
+  %tmp5 = icmp eq ptr %m, %n
+  %tmp6 = select i1 %tmp5, ptr %arg, ptr null
+  %tmp7 = icmp eq ptr %tmp6, null
   br i1 %tmp7, label %bb10, label %bb8
 
 bb:                                               ; preds = %bb10, %bb8
   ret void
 
 bb8:                                              ; preds = %entry
-  %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
-  tail call void @bar(%struct.S* %tmp9)
+  tail call void @bar(ptr %tmp6)
   br label %bb
 
 bb10:                                             ; preds = %entry
-  %tmp11 = tail call i64 %tmp4(%C* %arg)
+  %tmp11 = tail call i64 %tmp4(ptr %arg)
   br label %bb
 }
 
-define void @test2(%C* %arg) {
+define void @test2(ptr %arg) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    [[M:%.*]] = load i64*, i64** [[TMP]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
-; CHECK-NEXT:    [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64* [[M]], [[N]]
-; CHECK-NEXT:    [[TMP71:%.*]] = icmp eq %C* [[ARG]], null
-; CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP5]], i1 true, i1 [[TMP71]]
-; CHECK-NEXT:    br i1 [[TMP7]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK-NEXT:    [[M:%.*]] = load ptr, ptr [[ARG:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C:%.*]], ptr [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT:    [[N:%.*]] = load ptr, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq ptr [[M]], [[N]]
+; CHECK-NEXT:    br i1 [[TMP5]], label [[BB10:%.*]], label [[BB8:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb8:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
-; CHECK-NEXT:    tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT:    tail call void @bar(ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb10:
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[M]], i64 9
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB]]
 ;
 entry:
-  %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
-  %m = load i64*, i64** %tmp, align 8
-  %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
-  %n = load i64*, i64** %tmp1, align 8
-  %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
-  %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
-  %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
-  %tmp5 = icmp eq i64* %m, %n
-  %tmp6 = select i1 %tmp5, %C* null, %C* %arg
-  %tmp7 = icmp eq %C* %tmp6, null
+  %m = load ptr, ptr %arg, align 8
+  %tmp1 = getelementptr inbounds %C, ptr %arg, i64 1, i32 0, i32 0
+  %n = load ptr, ptr %tmp1, align 8
+  %tmp2 = getelementptr inbounds i64, ptr %m, i64 9
+  %tmp4 = load ptr, ptr %tmp2, align 8
+  %tmp5 = icmp eq ptr %m, %n
+  %tmp6 = select i1 %tmp5, ptr null, ptr %arg
+  %tmp7 = icmp eq ptr %tmp6, null
   br i1 %tmp7, label %bb10, label %bb8
 
 bb:                                               ; preds = %bb10, %bb8
   ret void
 
 bb8:                                              ; preds = %entry
-  %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
-  tail call void @bar(%struct.S* %tmp9)
+  tail call void @bar(ptr %tmp6)
   br label %bb
 
 bb10:                                             ; preds = %entry
-  %tmp11 = tail call i64 %tmp4(%C* %arg)
+  %tmp11 = tail call i64 %tmp4(ptr %arg)
   br label %bb
 }
 
-define void @test3(%C* %arg) {
+define void @test3(ptr %arg) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    [[M:%.*]] = load i64*, i64** [[TMP]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
-; CHECK-NEXT:    [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64* [[M]], [[N]]
-; CHECK-NEXT:    [[TMP7_NOT1:%.*]] = icmp eq %C* [[ARG]], null
-; CHECK-NEXT:    [[TMP7_NOT:%.*]] = select i1 [[TMP5]], i1 true, i1 [[TMP7_NOT1]]
-; CHECK-NEXT:    br i1 [[TMP7_NOT]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK-NEXT:    [[M:%.*]] = load ptr, ptr [[ARG:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C:%.*]], ptr [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT:    [[N:%.*]] = load ptr, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq ptr [[M]], [[N]]
+; CHECK-NEXT:    br i1 [[TMP5]], label [[BB8:%.*]], label [[BB10:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb8:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
-; CHECK-NEXT:    tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT:    tail call void @bar(ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb10:
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[M]], i64 9
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB]]
 ;
 entry:
-  %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
-  %m = load i64*, i64** %tmp, align 8
-  %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
-  %n = load i64*, i64** %tmp1, align 8
-  %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
-  %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
-  %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
-  %tmp5 = icmp eq i64* %m, %n
-  %tmp6 = select i1 %tmp5, %C* %arg, %C* null
-  %tmp7 = icmp ne %C* %tmp6, null
+  %m = load ptr, ptr %arg, align 8
+  %tmp1 = getelementptr inbounds %C, ptr %arg, i64 1, i32 0, i32 0
+  %n = load ptr, ptr %tmp1, align 8
+  %tmp2 = getelementptr inbounds i64, ptr %m, i64 9
+  %tmp4 = load ptr, ptr %tmp2, align 8
+  %tmp5 = icmp eq ptr %m, %n
+  %tmp6 = select i1 %tmp5, ptr %arg, ptr null
+  %tmp7 = icmp ne ptr %tmp6, null
   br i1 %tmp7, label %bb8, label %bb10
 
 bb:                                               ; preds = %bb10, %bb8
   ret void
 
 bb8:                                              ; preds = %entry
-  %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
-  tail call void @bar(%struct.S* %tmp9)
+  tail call void @bar(ptr %tmp6)
   br label %bb
 
 bb10:                                             ; preds = %entry
-  %tmp11 = tail call i64 %tmp4(%C* %arg)
+  %tmp11 = tail call i64 %tmp4(ptr %arg)
   br label %bb
 }
 
-define void @test4(%C* %arg) {
+define void @test4(ptr %arg) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    [[M:%.*]] = load i64*, i64** [[TMP]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 1, i32 0, i32 0
-; CHECK-NEXT:    [[N:%.*]] = load i64*, i64** [[TMP1]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64* [[M]], [[N]]
-; CHECK-NEXT:    [[TMP7_NOT1:%.*]] = icmp eq %C* [[ARG]], null
-; CHECK-NEXT:    [[TMP7_NOT:%.*]] = select i1 [[TMP5]], i1 true, i1 [[TMP7_NOT1]]
-; CHECK-NEXT:    br i1 [[TMP7_NOT]], label [[BB10:%.*]], label [[BB8:%.*]]
+; CHECK-NEXT:    [[M:%.*]] = load ptr, ptr [[ARG:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[C:%.*]], ptr [[ARG]], i64 1, i32 0, i32 0
+; CHECK-NEXT:    [[N:%.*]] = load ptr, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq ptr [[M]], [[N]]
+; CHECK-NEXT:    br i1 [[TMP5]], label [[BB10:%.*]], label [[BB8:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb8:
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[C]], %C* [[ARG]], i64 0, i32 0
-; CHECK-NEXT:    tail call void @bar(%struct.S* [[TMP9]])
+; CHECK-NEXT:    tail call void @bar(ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb10:
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, i64* [[M]], i64 9
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64* [[TMP2]] to i64 (%C*)**
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64 (%C*)*, i64 (%C*)** [[TMP3]], align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](%C* [[ARG]])
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[M]], i64 9
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call i64 [[TMP4]](ptr nonnull [[ARG]])
 ; CHECK-NEXT:    br label [[BB]]
 ;
 entry:
-  %tmp = getelementptr inbounds %C, %C* %arg, i64 0, i32 0, i32 0
-  %m = load i64*, i64** %tmp, align 8
-  %tmp1 = getelementptr inbounds %C, %C* %arg, i64 1, i32 0, i32 0
-  %n = load i64*, i64** %tmp1, align 8
-  %tmp2 = getelementptr inbounds i64, i64* %m, i64 9
-  %tmp3 = bitcast i64* %tmp2 to i64 (%C*)**
-  %tmp4 = load i64 (%C*)*, i64 (%C*)** %tmp3, align 8
-  %tmp5 = icmp eq i64* %m, %n
-  %tmp6 = select i1 %tmp5, %C* null, %C* %arg
-  %tmp7 = icmp ne %C* %tmp6, null
+  %m = load ptr, ptr %arg, align 8
+  %tmp1 = getelementptr inbounds %C, ptr %arg, i64 1, i32 0, i32 0
+  %n = load ptr, ptr %tmp1, align 8
+  %tmp2 = getelementptr inbounds i64, ptr %m, i64 9
+  %tmp4 = load ptr, ptr %tmp2, align 8
+  %tmp5 = icmp eq ptr %m, %n
+  %tmp6 = select i1 %tmp5, ptr null, ptr %arg
+  %tmp7 = icmp ne ptr %tmp6, null
   br i1 %tmp7, label %bb8, label %bb10
 
 bb:                                               ; preds = %bb10, %bb8
   ret void
 
 bb8:                                              ; preds = %entry
-  %tmp9 = getelementptr inbounds %C, %C* %tmp6, i64 0, i32 0
-  tail call void @bar(%struct.S* %tmp9)
+  tail call void @bar(ptr %tmp6)
   br label %bb
 
 bb10:                                             ; preds = %entry
-  %tmp11 = tail call i64 %tmp4(%C* %arg)
+  %tmp11 = tail call i64 %tmp4(ptr %arg)
   br label %bb
 }
 
-define void @test5(%C* %arg, i1 %arg1) {
+define void @test5(ptr %arg, i1 %arg1) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP2_NOT1:%.*]] = icmp eq %C* [[ARG:%.*]], null
+; CHECK-NEXT:    [[TMP2_NOT1:%.*]] = icmp eq ptr [[ARG:%.*]], null
 ; CHECK-NEXT:    [[TMP2_NOT:%.*]] = select i1 [[ARG1:%.*]], i1 true, i1 [[TMP2_NOT1]]
 ; CHECK-NEXT:    br i1 [[TMP2_NOT]], label [[BB5:%.*]], label [[BB3:%.*]]
 ; CHECK:       bb:
 ; CHECK-NEXT:    ret void
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[C:%.*]], %C* [[ARG]], i64 0, i32 0
-; CHECK-NEXT:    tail call void @bar(%struct.S* [[TMP4]])
+; CHECK-NEXT:    tail call void @bar(ptr [[ARG]])
 ; CHECK-NEXT:    br label [[BB:%.*]]
 ; CHECK:       bb5:
 ; CHECK-NEXT:    tail call void @foobar()
 ; CHECK-NEXT:    br label [[BB]]
 ;
 entry:
-  %tmp = select i1 %arg1, %C* null, %C* %arg
-  %tmp2 = icmp ne %C* %tmp, null
+  %tmp = select i1 %arg1, ptr null, ptr %arg
+  %tmp2 = icmp ne ptr %tmp, null
   br i1 %tmp2, label %bb3, label %bb5
 
 bb:                                               ; preds = %bb5, %bb3
   ret void
 
 bb3:                                              ; preds = %entry
-  %tmp4 = getelementptr inbounds %C, %C* %tmp, i64 0, i32 0
-  tail call void @bar(%struct.S* %tmp4)
+  tail call void @bar(ptr %tmp)
   br label %bb
 
 bb5:                                              ; preds = %entry

diff  --git a/llvm/test/Transforms/InstCombine/unpack-fca.ll b/llvm/test/Transforms/InstCombine/unpack-fca.ll
index b950445bda91..4ac78b9e32ce 100644
--- a/llvm/test/Transforms/InstCombine/unpack-fca.ll
+++ b/llvm/test/Transforms/InstCombine/unpack-fca.ll
@@ -4,214 +4,200 @@
 target datalayout = "e-i64:64-f80:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-%A__vtbl = type { i8*, i32 (%A*)* }
-%A = type { %A__vtbl* }
-%B = type { i8*, i64 }
+%A__vtbl = type { ptr, ptr }
+%A = type { ptr }
+%B = type { ptr, i64 }
 
- at A__vtblZ = constant %A__vtbl { i8* null, i32 (%A*)* @A.foo }
+ at A__vtblZ = constant %A__vtbl { ptr null, ptr @A.foo }
 
-declare i32 @A.foo(%A* nocapture %this)
+declare i32 @A.foo(ptr nocapture %this)
 
-define void @storeA(%A* %a.ptr) {
+define void @storeA(ptr %a.ptr) {
 ; CHECK-LABEL: @storeA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [[A:%.*]], %A* [[A_PTR:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store %A__vtbl* @A__vtblZ, %A__vtbl** [[TMP1]], align 8
+; CHECK-NEXT:    store ptr @A__vtblZ, ptr [[A_PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store %A { %A__vtbl* @A__vtblZ }, %A* %a.ptr, align 8
+  store %A { ptr @A__vtblZ }, ptr %a.ptr, align 8
   ret void
 }
 
-define void @storeB(%B* %b.ptr) {
+define void @storeB(ptr %b.ptr) {
 ; CHECK-LABEL: @storeB(
-; CHECK-NEXT:    [[B_PTR_REPACK:%.*]] = getelementptr inbounds [[B:%.*]], %B* [[B_PTR:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store i8* null, i8** [[B_PTR_REPACK]], align 8
-; CHECK-NEXT:    [[B_PTR_REPACK1:%.*]] = getelementptr inbounds [[B]], %B* [[B_PTR]], i64 0, i32 1
-; CHECK-NEXT:    store i64 42, i64* [[B_PTR_REPACK1]], align 8
+; CHECK-NEXT:    store ptr null, ptr [[B_PTR:%.*]], align 8
+; CHECK-NEXT:    [[B_PTR_REPACK1:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[B_PTR]], i64 0, i32 1
+; CHECK-NEXT:    store i64 42, ptr [[B_PTR_REPACK1]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store %B { i8* null, i64 42 }, %B* %b.ptr, align 8
+  store %B { ptr null, i64 42 }, ptr %b.ptr, align 8
   ret void
 }
 
-define void @storeStructOfA({ %A }* %sa.ptr) {
+define void @storeStructOfA(ptr %sa.ptr) {
 ; CHECK-LABEL: @storeStructOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr { [[A:%.*]] }, { [[A]] }* [[SA_PTR:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    store %A__vtbl* @A__vtblZ, %A__vtbl** [[TMP1]], align 8
+; CHECK-NEXT:    store ptr @A__vtblZ, ptr [[SA_PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8
+  store { %A } { %A { ptr @A__vtblZ } }, ptr %sa.ptr, align 8
   ret void
 }
 
-define void @storeArrayOfA([1 x %A]* %aa.ptr) {
+define void @storeArrayOfA(ptr %aa.ptr) {
 ; CHECK-LABEL: @storeArrayOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [1 x %A], [1 x %A]* [[AA_PTR:%.*]], i64 0, i64 0, i32 0
-; CHECK-NEXT:    store %A__vtbl* @A__vtblZ, %A__vtbl** [[TMP1]], align 8
+; CHECK-NEXT:    store ptr @A__vtblZ, ptr [[AA_PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store [1 x %A] [%A { %A__vtbl* @A__vtblZ }], [1 x %A]* %aa.ptr, align 8
+  store [1 x %A] [%A { ptr @A__vtblZ }], ptr %aa.ptr, align 8
   ret void
 }
 
 ; UTC_ARGS: --disable
-define void @storeLargeArrayOfA([2000 x %A]* %aa.ptr) {
+define void @storeLargeArrayOfA(ptr %aa.ptr) {
 ; CHECK-LABEL: @storeLargeArrayOfA(
 ; CHECK-NEXT:    store [2000 x %A]
 ; CHECK-NEXT:    ret void
 ;
-  %i1 = insertvalue [2000 x %A] poison, %A { %A__vtbl* @A__vtblZ }, 1
-  store [2000 x %A] %i1, [2000 x %A]* %aa.ptr, align 8
+  %i1 = insertvalue [2000 x %A] poison, %A { ptr @A__vtblZ }, 1
+  store [2000 x %A] %i1, ptr %aa.ptr, align 8
   ret void
 }
 ; UTC_ARGS: --enable
 
-define void @storeStructOfArrayOfA({ [1 x %A] }* %saa.ptr) {
+define void @storeStructOfArrayOfA(ptr %saa.ptr) {
 ; CHECK-LABEL: @storeStructOfArrayOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr { [1 x %A] }, { [1 x %A] }* [[SAA_PTR:%.*]], i64 0, i32 0, i64 0, i32 0
-; CHECK-NEXT:    store %A__vtbl* @A__vtblZ, %A__vtbl** [[TMP1]], align 8
+; CHECK-NEXT:    store ptr @A__vtblZ, ptr [[SAA_PTR:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store { [1 x %A] } { [1 x %A] [%A { %A__vtbl* @A__vtblZ }] }, { [1 x %A] }* %saa.ptr, align 8
+  store { [1 x %A] } { [1 x %A] [%A { ptr @A__vtblZ }] }, ptr %saa.ptr, align 8
   ret void
 }
 
-define void @storeArrayOfB([2 x %B]* %ab.ptr, [2 x %B] %ab) {
+define void @storeArrayOfB(ptr %ab.ptr, [2 x %B] %ab) {
 ; CHECK-LABEL: @storeArrayOfB(
 ; CHECK-NEXT:    [[AB_ELT:%.*]] = extractvalue [2 x %B] [[AB:%.*]], 0
-; CHECK-NEXT:    [[AB_PTR_REPACK_REPACK:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR:%.*]], i64 0, i64 0, i32 0
 ; CHECK-NEXT:    [[AB_ELT_ELT:%.*]] = extractvalue [[B:%.*]] [[AB_ELT]], 0
-; CHECK-NEXT:    store i8* [[AB_ELT_ELT]], i8** [[AB_PTR_REPACK_REPACK]], align 8
-; CHECK-NEXT:    [[AB_PTR_REPACK_REPACK3:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 0, i32 1
+; CHECK-NEXT:    store ptr [[AB_ELT_ELT]], ptr [[AB_PTR:%.*]], align 8
+; CHECK-NEXT:    [[AB_PTR_REPACK3:%.*]] = getelementptr inbounds [[B]], ptr [[AB_PTR]], i64 0, i32 1
 ; CHECK-NEXT:    [[AB_ELT_ELT4:%.*]] = extractvalue [[B]] [[AB_ELT]], 1
-; CHECK-NEXT:    store i64 [[AB_ELT_ELT4]], i64* [[AB_PTR_REPACK_REPACK3]], align 8
+; CHECK-NEXT:    store i64 [[AB_ELT_ELT4]], ptr [[AB_PTR_REPACK3]], align 8
+; CHECK-NEXT:    [[AB_PTR_REPACK1:%.*]] = getelementptr inbounds [2 x %B], ptr [[AB_PTR]], i64 0, i64 1
 ; CHECK-NEXT:    [[AB_ELT2:%.*]] = extractvalue [2 x %B] [[AB]], 1
-; CHECK-NEXT:    [[AB_PTR_REPACK1_REPACK:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 1, i32 0
 ; CHECK-NEXT:    [[AB_ELT2_ELT:%.*]] = extractvalue [[B]] [[AB_ELT2]], 0
-; CHECK-NEXT:    store i8* [[AB_ELT2_ELT]], i8** [[AB_PTR_REPACK1_REPACK]], align 8
-; CHECK-NEXT:    [[AB_PTR_REPACK1_REPACK5:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 1, i32 1
+; CHECK-NEXT:    store ptr [[AB_ELT2_ELT]], ptr [[AB_PTR_REPACK1]], align 8
+; CHECK-NEXT:    [[AB_PTR_REPACK1_REPACK5:%.*]] = getelementptr inbounds [2 x %B], ptr [[AB_PTR]], i64 0, i64 1, i32 1
 ; CHECK-NEXT:    [[AB_ELT2_ELT6:%.*]] = extractvalue [[B]] [[AB_ELT2]], 1
-; CHECK-NEXT:    store i64 [[AB_ELT2_ELT6]], i64* [[AB_PTR_REPACK1_REPACK5]], align 8
+; CHECK-NEXT:    store i64 [[AB_ELT2_ELT6]], ptr [[AB_PTR_REPACK1_REPACK5]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  store [2 x %B] %ab, [2 x %B]* %ab.ptr, align 8
+  store [2 x %B] %ab, ptr %ab.ptr, align 8
   ret void
 }
 
-define %A @loadA(%A* %a.ptr) {
+define %A @loadA(ptr %a.ptr) {
 ; CHECK-LABEL: @loadA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [[A:%.*]], %A* [[A_PTR:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load %A__vtbl*, %A__vtbl** [[TMP1]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue [[A]] poison, %A__vtbl* [[DOTUNPACK]], 0
-; CHECK-NEXT:    ret [[A]] [[TMP2]]
+; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load ptr, ptr [[A_PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [[A:%.*]] poison, ptr [[DOTUNPACK]], 0
+; CHECK-NEXT:    ret [[A]] [[TMP1]]
 ;
-  %1 = load %A, %A* %a.ptr, align 8
+  %1 = load %A, ptr %a.ptr, align 8
   ret %A %1
 }
 
-define %B @loadB(%B* %b.ptr) {
+define %B @loadB(ptr %b.ptr) {
 ; CHECK-LABEL: @loadB(
-; CHECK-NEXT:    [[DOTELT:%.*]] = getelementptr inbounds [[B:%.*]], %B* [[B_PTR:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load i8*, i8** [[DOTELT]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [[B]] poison, i8* [[DOTUNPACK]], 0
-; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds [[B]], %B* [[B_PTR]], i64 0, i32 1
-; CHECK-NEXT:    [[DOTUNPACK2:%.*]] = load i64, i64* [[DOTELT1]], align 8
+; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load ptr, ptr [[B_PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [[B:%.*]] poison, ptr [[DOTUNPACK]], 0
+; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds [[B]], ptr [[B_PTR]], i64 0, i32 1
+; CHECK-NEXT:    [[DOTUNPACK2:%.*]] = load i64, ptr [[DOTELT1]], align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue [[B]] [[TMP1]], i64 [[DOTUNPACK2]], 1
 ; CHECK-NEXT:    ret [[B]] [[TMP2]]
 ;
-  %1 = load %B, %B* %b.ptr, align 8
+  %1 = load %B, ptr %b.ptr, align 8
   ret %B %1
 }
 
-define { %A } @loadStructOfA({ %A }* %sa.ptr) {
+define { %A } @loadStructOfA(ptr %sa.ptr) {
 ; CHECK-LABEL: @loadStructOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr { [[A:%.*]] }, { [[A]] }* [[SA_PTR:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load %A__vtbl*, %A__vtbl** [[TMP1]], align 8
-; CHECK-NEXT:    [[DOTUNPACK1:%.*]] = insertvalue [[A]] poison, %A__vtbl* [[DOTUNPACK_UNPACK]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { [[A]] } poison, [[A]] [[DOTUNPACK1]], 0
-; CHECK-NEXT:    ret { [[A]] } [[TMP2]]
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load ptr, ptr [[SA_PTR:%.*]], align 8
+; CHECK-NEXT:    [[DOTUNPACK1:%.*]] = insertvalue [[A:%.*]] poison, ptr [[DOTUNPACK_UNPACK]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue { [[A]] } poison, [[A]] [[DOTUNPACK1]], 0
+; CHECK-NEXT:    ret { [[A]] } [[TMP1]]
 ;
-  %1 = load { %A }, { %A }* %sa.ptr, align 8
+  %1 = load { %A }, ptr %sa.ptr, align 8
   ret { %A } %1
 }
 
-define [1 x %A] @loadArrayOfA([1 x %A]* %aa.ptr) {
+define [1 x %A] @loadArrayOfA(ptr %aa.ptr) {
 ; CHECK-LABEL: @loadArrayOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [1 x %A], [1 x %A]* [[AA_PTR:%.*]], i64 0, i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load %A__vtbl*, %A__vtbl** [[TMP1]], align 8
-; CHECK-NEXT:    [[DOTUNPACK1:%.*]] = insertvalue [[A:%.*]] poison, %A__vtbl* [[DOTUNPACK_UNPACK]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue [1 x %A] poison, [[A]] [[DOTUNPACK1]], 0
-; CHECK-NEXT:    ret [1 x %A] [[TMP2]]
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load ptr, ptr [[AA_PTR:%.*]], align 8
+; CHECK-NEXT:    [[DOTUNPACK1:%.*]] = insertvalue [[A:%.*]] poison, ptr [[DOTUNPACK_UNPACK]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [1 x %A] poison, [[A]] [[DOTUNPACK1]], 0
+; CHECK-NEXT:    ret [1 x %A] [[TMP1]]
 ;
-  %1 = load [1 x %A], [1 x %A]* %aa.ptr, align 8
+  %1 = load [1 x %A], ptr %aa.ptr, align 8
   ret [1 x %A] %1
 }
 
-define { [1 x %A] } @loadStructOfArrayOfA({ [1 x %A] }* %saa.ptr) {
+define { [1 x %A] } @loadStructOfArrayOfA(ptr %saa.ptr) {
 ; CHECK-LABEL: @loadStructOfArrayOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr { [1 x %A] }, { [1 x %A] }* [[SAA_PTR:%.*]], i64 0, i32 0, i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK_UNPACK:%.*]] = load %A__vtbl*, %A__vtbl** [[TMP1]], align 8
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK2:%.*]] = insertvalue [[A:%.*]] poison, %A__vtbl* [[DOTUNPACK_UNPACK_UNPACK]], 0
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK_UNPACK:%.*]] = load ptr, ptr [[SAA_PTR:%.*]], align 8
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK2:%.*]] = insertvalue [[A:%.*]] poison, ptr [[DOTUNPACK_UNPACK_UNPACK]], 0
 ; CHECK-NEXT:    [[DOTUNPACK1:%.*]] = insertvalue [1 x %A] poison, [[A]] [[DOTUNPACK_UNPACK2]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { [1 x %A] } poison, [1 x %A] [[DOTUNPACK1]], 0
-; CHECK-NEXT:    ret { [1 x %A] } [[TMP2]]
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue { [1 x %A] } poison, [1 x %A] [[DOTUNPACK1]], 0
+; CHECK-NEXT:    ret { [1 x %A] } [[TMP1]]
 ;
-  %1 = load { [1 x %A] }, { [1 x %A] }* %saa.ptr, align 8
+  %1 = load { [1 x %A] }, ptr %saa.ptr, align 8
   ret { [1 x %A] } %1
 }
 
-define { %A } @structOfA({ %A }* %sa.ptr) {
+define { %A } @structOfA(ptr %sa.ptr) {
 ; CHECK-LABEL: @structOfA(
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr { [[A:%.*]] }, { [[A]] }* [[SA_PTR:%.*]], i64 0, i32 0, i32 0
-; CHECK-NEXT:    store %A__vtbl* @A__vtblZ, %A__vtbl** [[TMP1]], align 8
-; CHECK-NEXT:    ret { [[A]] } { [[A]] { %A__vtbl* @A__vtblZ } }
+; CHECK-NEXT:    store ptr @A__vtblZ, ptr [[SA_PTR:%.*]], align 8
+; CHECK-NEXT:    ret { [[A:%.*]] } { [[A]] { ptr @A__vtblZ } }
 ;
-  store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8
-  %1 = load { %A }, { %A }* %sa.ptr, align 8
+  store { %A } { %A { ptr @A__vtblZ } }, ptr %sa.ptr, align 8
+  %1 = load { %A }, ptr %sa.ptr, align 8
   ret { %A } %1
 }
 
-define %B @structB(%B* %b.ptr) {
+define %B @structB(ptr %b.ptr) {
 ; CHECK-LABEL: @structB(
-; CHECK-NEXT:    [[B_PTR_REPACK:%.*]] = getelementptr inbounds [[B:%.*]], %B* [[B_PTR:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store i8* null, i8** [[B_PTR_REPACK]], align 8
-; CHECK-NEXT:    [[B_PTR_REPACK1:%.*]] = getelementptr inbounds [[B]], %B* [[B_PTR]], i64 0, i32 1
-; CHECK-NEXT:    store i64 42, i64* [[B_PTR_REPACK1]], align 8
-; CHECK-NEXT:    ret [[B]] { i8* null, i64 42 }
+; CHECK-NEXT:    store ptr null, ptr [[B_PTR:%.*]], align 8
+; CHECK-NEXT:    [[B_PTR_REPACK1:%.*]] = getelementptr inbounds [[B:%.*]], ptr [[B_PTR]], i64 0, i32 1
+; CHECK-NEXT:    store i64 42, ptr [[B_PTR_REPACK1]], align 8
+; CHECK-NEXT:    ret [[B]] { ptr null, i64 42 }
 ;
-  store %B { i8* null, i64 42 }, %B* %b.ptr, align 8
-  %1 = load %B, %B* %b.ptr, align 8
+  store %B { ptr null, i64 42 }, ptr %b.ptr, align 8
+  %1 = load %B, ptr %b.ptr, align 8
   ret %B %1
 }
 
-define [2 x %B] @loadArrayOfB([2 x %B]* %ab.ptr) {
+define [2 x %B] @loadArrayOfB(ptr %ab.ptr) {
 ; CHECK-LABEL: @loadArrayOfB(
-; CHECK-NEXT:    [[DOTUNPACK_ELT:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR:%.*]], i64 0, i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load i8*, i8** [[DOTUNPACK_ELT]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [[B:%.*]] poison, i8* [[DOTUNPACK_UNPACK]], 0
-; CHECK-NEXT:    [[DOTUNPACK_ELT3:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 0, i32 1
-; CHECK-NEXT:    [[DOTUNPACK_UNPACK4:%.*]] = load i64, i64* [[DOTUNPACK_ELT3]], align 8
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK:%.*]] = load ptr, ptr [[AB_PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertvalue [[B:%.*]] poison, ptr [[DOTUNPACK_UNPACK]], 0
+; CHECK-NEXT:    [[DOTUNPACK_ELT3:%.*]] = getelementptr inbounds [[B]], ptr [[AB_PTR]], i64 0, i32 1
+; CHECK-NEXT:    [[DOTUNPACK_UNPACK4:%.*]] = load i64, ptr [[DOTUNPACK_ELT3]], align 8
 ; CHECK-NEXT:    [[DOTUNPACK5:%.*]] = insertvalue [[B]] [[TMP1]], i64 [[DOTUNPACK_UNPACK4]], 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue [2 x %B] poison, [[B]] [[DOTUNPACK5]], 0
-; CHECK-NEXT:    [[DOTUNPACK2_ELT:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 1, i32 0
-; CHECK-NEXT:    [[DOTUNPACK2_UNPACK:%.*]] = load i8*, i8** [[DOTUNPACK2_ELT]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = insertvalue [[B]] poison, i8* [[DOTUNPACK2_UNPACK]], 0
-; CHECK-NEXT:    [[DOTUNPACK2_ELT6:%.*]] = getelementptr inbounds [2 x %B], [2 x %B]* [[AB_PTR]], i64 0, i64 1, i32 1
-; CHECK-NEXT:    [[DOTUNPACK2_UNPACK7:%.*]] = load i64, i64* [[DOTUNPACK2_ELT6]], align 8
+; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds [2 x %B], ptr [[AB_PTR]], i64 0, i64 1
+; CHECK-NEXT:    [[DOTUNPACK2_UNPACK:%.*]] = load ptr, ptr [[DOTELT1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = insertvalue [[B]] poison, ptr [[DOTUNPACK2_UNPACK]], 0
+; CHECK-NEXT:    [[DOTUNPACK2_ELT6:%.*]] = getelementptr inbounds [2 x %B], ptr [[AB_PTR]], i64 0, i64 1, i32 1
+; CHECK-NEXT:    [[DOTUNPACK2_UNPACK7:%.*]] = load i64, ptr [[DOTUNPACK2_ELT6]], align 8
 ; CHECK-NEXT:    [[DOTUNPACK28:%.*]] = insertvalue [[B]] [[TMP3]], i64 [[DOTUNPACK2_UNPACK7]], 1
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertvalue [2 x %B] [[TMP2]], [[B]] [[DOTUNPACK28]], 1
 ; CHECK-NEXT:    ret [2 x %B] [[TMP4]]
 ;
-  %1 = load [2 x %B], [2 x %B]* %ab.ptr, align 8
+  %1 = load [2 x %B], ptr %ab.ptr, align 8
   ret [2 x %B] %1
 }
 
-define [2000 x %B] @loadLargeArrayOfB([2000 x %B]* %ab.ptr) {
+define [2000 x %B] @loadLargeArrayOfB(ptr %ab.ptr) {
 ; CHECK-LABEL: @loadLargeArrayOfB(
-; CHECK-NEXT:    [[TMP1:%.*]] = load [2000 x %B], [2000 x %B]* [[AB_PTR:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load [2000 x %B], ptr [[AB_PTR:%.*]], align 8
 ; CHECK-NEXT:    ret [2000 x %B] [[TMP1]]
 ;
-  %1 = load [2000 x %B], [2000 x %B]* %ab.ptr, align 8
+  %1 = load [2000 x %B], ptr %ab.ptr, align 8
   ret [2000 x %B] %1
 }
 
@@ -219,61 +205,59 @@ define [2000 x %B] @loadLargeArrayOfB([2000 x %B]* %ab.ptr) {
 %struct.T = type { i32, i32 }
 
 ; Make sure that we do not increase alignment of packed struct element
-define i32 @packed_alignment(%struct.S* dereferenceable(9) %s) {
+define i32 @packed_alignment(ptr dereferenceable(9) %s) {
 ; CHECK-LABEL: @packed_alignment(
-; CHECK-NEXT:    [[TV_ELT1:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[S:%.*]], i64 0, i32 1, i32 1
-; CHECK-NEXT:    [[TV_UNPACK2:%.*]] = load i32, i32* [[TV_ELT1]], align 1
+; CHECK-NEXT:    [[TV_ELT1:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[S:%.*]], i64 0, i32 1, i32 1
+; CHECK-NEXT:    [[TV_UNPACK2:%.*]] = load i32, ptr [[TV_ELT1]], align 1
 ; CHECK-NEXT:    ret i32 [[TV_UNPACK2]]
 ;
-  %t = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 1
-  %tv = load %struct.T, %struct.T* %t, align 1
+  %t = getelementptr inbounds %struct.S, ptr %s, i32 0, i32 1
+  %tv = load %struct.T, ptr %t, align 1
   %v = extractvalue %struct.T %tv, 1
   ret i32 %v
 }
 
 %struct.U = type {i8, i8, i8, i8, i8, i8, i8, i8, i64}
 
-define void @check_alignment(%struct.U* %u, %struct.U* %v) {
+define void @check_alignment(ptr %u, ptr %v) {
 ; CHECK-LABEL: @check_alignment(
-; CHECK-NEXT:    [[DOTELT:%.*]] = getelementptr inbounds [[STRUCT_U:%.*]], %struct.U* [[U:%.*]], i64 0, i32 0
-; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load i8, i8* [[DOTELT]], align 8
-; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 1
-; CHECK-NEXT:    [[DOTUNPACK2:%.*]] = load i8, i8* [[DOTELT1]], align 1
-; CHECK-NEXT:    [[DOTELT3:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 2
-; CHECK-NEXT:    [[DOTUNPACK4:%.*]] = load i8, i8* [[DOTELT3]], align 2
-; CHECK-NEXT:    [[DOTELT5:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 3
-; CHECK-NEXT:    [[DOTUNPACK6:%.*]] = load i8, i8* [[DOTELT5]], align 1
-; CHECK-NEXT:    [[DOTELT7:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 4
-; CHECK-NEXT:    [[DOTUNPACK8:%.*]] = load i8, i8* [[DOTELT7]], align 4
-; CHECK-NEXT:    [[DOTELT9:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 5
-; CHECK-NEXT:    [[DOTUNPACK10:%.*]] = load i8, i8* [[DOTELT9]], align 1
-; CHECK-NEXT:    [[DOTELT11:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 6
-; CHECK-NEXT:    [[DOTUNPACK12:%.*]] = load i8, i8* [[DOTELT11]], align 2
-; CHECK-NEXT:    [[DOTELT13:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 7
-; CHECK-NEXT:    [[DOTUNPACK14:%.*]] = load i8, i8* [[DOTELT13]], align 1
-; CHECK-NEXT:    [[DOTELT15:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[U]], i64 0, i32 8
-; CHECK-NEXT:    [[DOTUNPACK16:%.*]] = load i64, i64* [[DOTELT15]], align 8
-; CHECK-NEXT:    [[V_REPACK:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V:%.*]], i64 0, i32 0
-; CHECK-NEXT:    store i8 [[DOTUNPACK]], i8* [[V_REPACK]], align 8
-; CHECK-NEXT:    [[V_REPACK18:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 1
-; CHECK-NEXT:    store i8 [[DOTUNPACK2]], i8* [[V_REPACK18]], align 1
-; CHECK-NEXT:    [[V_REPACK20:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 2
-; CHECK-NEXT:    store i8 [[DOTUNPACK4]], i8* [[V_REPACK20]], align 2
-; CHECK-NEXT:    [[V_REPACK22:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 3
-; CHECK-NEXT:    store i8 [[DOTUNPACK6]], i8* [[V_REPACK22]], align 1
-; CHECK-NEXT:    [[V_REPACK24:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 4
-; CHECK-NEXT:    store i8 [[DOTUNPACK8]], i8* [[V_REPACK24]], align 4
-; CHECK-NEXT:    [[V_REPACK26:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 5
-; CHECK-NEXT:    store i8 [[DOTUNPACK10]], i8* [[V_REPACK26]], align 1
-; CHECK-NEXT:    [[V_REPACK28:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 6
-; CHECK-NEXT:    store i8 [[DOTUNPACK12]], i8* [[V_REPACK28]], align 2
-; CHECK-NEXT:    [[V_REPACK30:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 7
-; CHECK-NEXT:    store i8 [[DOTUNPACK14]], i8* [[V_REPACK30]], align 1
-; CHECK-NEXT:    [[V_REPACK32:%.*]] = getelementptr inbounds [[STRUCT_U]], %struct.U* [[V]], i64 0, i32 8
-; CHECK-NEXT:    store i64 [[DOTUNPACK16]], i64* [[V_REPACK32]], align 8
+; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load i8, ptr [[U:%.*]], align 8
+; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds [[STRUCT_U:%.*]], ptr [[U]], i64 0, i32 1
+; CHECK-NEXT:    [[DOTUNPACK2:%.*]] = load i8, ptr [[DOTELT1]], align 1
+; CHECK-NEXT:    [[DOTELT3:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 2
+; CHECK-NEXT:    [[DOTUNPACK4:%.*]] = load i8, ptr [[DOTELT3]], align 2
+; CHECK-NEXT:    [[DOTELT5:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 3
+; CHECK-NEXT:    [[DOTUNPACK6:%.*]] = load i8, ptr [[DOTELT5]], align 1
+; CHECK-NEXT:    [[DOTELT7:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 4
+; CHECK-NEXT:    [[DOTUNPACK8:%.*]] = load i8, ptr [[DOTELT7]], align 4
+; CHECK-NEXT:    [[DOTELT9:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 5
+; CHECK-NEXT:    [[DOTUNPACK10:%.*]] = load i8, ptr [[DOTELT9]], align 1
+; CHECK-NEXT:    [[DOTELT11:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 6
+; CHECK-NEXT:    [[DOTUNPACK12:%.*]] = load i8, ptr [[DOTELT11]], align 2
+; CHECK-NEXT:    [[DOTELT13:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 7
+; CHECK-NEXT:    [[DOTUNPACK14:%.*]] = load i8, ptr [[DOTELT13]], align 1
+; CHECK-NEXT:    [[DOTELT15:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[U]], i64 0, i32 8
+; CHECK-NEXT:    [[DOTUNPACK16:%.*]] = load i64, ptr [[DOTELT15]], align 8
+; CHECK-NEXT:    store i8 [[DOTUNPACK]], ptr [[V:%.*]], align 8
+; CHECK-NEXT:    [[V_REPACK17:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 1
+; CHECK-NEXT:    store i8 [[DOTUNPACK2]], ptr [[V_REPACK17]], align 1
+; CHECK-NEXT:    [[V_REPACK19:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 2
+; CHECK-NEXT:    store i8 [[DOTUNPACK4]], ptr [[V_REPACK19]], align 2
+; CHECK-NEXT:    [[V_REPACK21:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 3
+; CHECK-NEXT:    store i8 [[DOTUNPACK6]], ptr [[V_REPACK21]], align 1
+; CHECK-NEXT:    [[V_REPACK23:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 4
+; CHECK-NEXT:    store i8 [[DOTUNPACK8]], ptr [[V_REPACK23]], align 4
+; CHECK-NEXT:    [[V_REPACK25:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 5
+; CHECK-NEXT:    store i8 [[DOTUNPACK10]], ptr [[V_REPACK25]], align 1
+; CHECK-NEXT:    [[V_REPACK27:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 6
+; CHECK-NEXT:    store i8 [[DOTUNPACK12]], ptr [[V_REPACK27]], align 2
+; CHECK-NEXT:    [[V_REPACK29:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 7
+; CHECK-NEXT:    store i8 [[DOTUNPACK14]], ptr [[V_REPACK29]], align 1
+; CHECK-NEXT:    [[V_REPACK31:%.*]] = getelementptr inbounds [[STRUCT_U]], ptr [[V]], i64 0, i32 8
+; CHECK-NEXT:    store i64 [[DOTUNPACK16]], ptr [[V_REPACK31]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %1 = load %struct.U, %struct.U* %u
-  store %struct.U %1, %struct.U* %v
+  %1 = load %struct.U, ptr %u
+  store %struct.U %1, ptr %v
   ret void
 }


        


More information about the llvm-commits mailing list