[llvm] 6e83c0a - [X86] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 03:44:19 PST 2024


Author: Nikita Popov
Date: 2024-02-05T12:43:44+01:00
New Revision: 6e83c0a1cbfdb0c0f13c282312c47c7945970f55

URL: https://github.com/llvm/llvm-project/commit/6e83c0a1cbfdb0c0f13c282312c47c7945970f55
DIFF: https://github.com/llvm/llvm-project/commit/6e83c0a1cbfdb0c0f13c282312c47c7945970f55.diff

LOG: [X86] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/AMX/amx-combine.ll
    llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll
    llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
    llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
    llvm/test/CodeGen/X86/PR37310.mir
    llvm/test/CodeGen/X86/atomic-dagsched.ll
    llvm/test/CodeGen/X86/atomic-nocx16.ll
    llvm/test/CodeGen/X86/avoid-sfb-g-no-change.mir
    llvm/test/CodeGen/X86/avoid-sfb-g-no-change2.mir
    llvm/test/CodeGen/X86/avoid-sfb-g-no-change3.mir
    llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir
    llvm/test/CodeGen/X86/avoid-sfb-offset.mir
    llvm/test/CodeGen/X86/avx512f-256-set0.mir
    llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
    llvm/test/CodeGen/X86/basic-block-labels-mir-parse.mir
    llvm/test/CodeGen/X86/basic-block-sections-module1.ll
    llvm/test/CodeGen/X86/basic-block-sections-module2.ll
    llvm/test/CodeGen/X86/block-placement.ll
    llvm/test/CodeGen/X86/callbr-asm-sink.ll
    llvm/test/CodeGen/X86/cmp.ll
    llvm/test/CodeGen/X86/code-model-kernel.ll
    llvm/test/CodeGen/X86/code_placement.ll
    llvm/test/CodeGen/X86/complex-asm.ll
    llvm/test/CodeGen/X86/crash.ll
    llvm/test/CodeGen/X86/fastisel-memset-flush.ll
    llvm/test/CodeGen/X86/function-alias.ll
    llvm/test/CodeGen/X86/funnel-shift.ll
    llvm/test/CodeGen/X86/large-constants-x32.ll
    llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
    llvm/test/CodeGen/X86/madd.ll
    llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
    llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
    llvm/test/CodeGen/X86/min-legal-vector-width.ll
    llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
    llvm/test/CodeGen/X86/pr44140.ll
    llvm/test/CodeGen/X86/pr48064.mir
    llvm/test/CodeGen/X86/pre-coalesce-2.ll
    llvm/test/CodeGen/X86/sad.ll
    llvm/test/CodeGen/X86/select-neg.ll
    llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
    llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
    llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
    llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
    llvm/test/CodeGen/X86/tailcc-dwarf.ll
    llvm/test/CodeGen/X86/threadlocal_address.ll
    llvm/test/CodeGen/X86/win64-byval.ll
    llvm/test/CodeGen/X86/windows-seh-EHa-CppCatchDotDotDot.ll
    llvm/test/CodeGen/X86/windows-seh-EHa-CppCondiTemps.ll
    llvm/test/CodeGen/X86/windows-seh-EHa-CppDtors01.ll
    llvm/test/CodeGen/X86/windows-seh-EHa-TryInFinally.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/AMX/amx-combine.ll b/llvm/test/CodeGen/X86/AMX/amx-combine.ll
index fe21d64eb7a3a..07f489c633c55 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-combine.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-combine.ll
@@ -133,7 +133,7 @@ entry:
   ret void
 }
 
-define void @combine_v256i8amcast_with_store(i8* %src_ptr, <256 x i8>* %dst_ptr) {
+define void @combine_v256i8amcast_with_store(ptr %src_ptr, ptr %dst_ptr) {
 ; CHECK-LABEL: @combine_v256i8amcast_with_store(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TILE:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr [[SRC_PTR:%.*]], i64 64)
@@ -141,13 +141,13 @@ define void @combine_v256i8amcast_with_store(i8* %src_ptr, <256 x i8>* %dst_ptr)
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %tile = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, i8* %src_ptr, i64 64)
+  %tile = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr %src_ptr, i64 64)
   %vec = call <256 x i8> @llvm.x86.cast.tile.to.vector.v256i8(x86_amx %tile)
-  store <256 x i8> %vec, <256 x i8>* %dst_ptr, align 256
+  store <256 x i8> %vec, ptr %dst_ptr, align 256
   ret void
 }
 
-define void @combine_v256i8amcast_with_load(i8* %src_ptr, <256 x i8>* %dst_ptr) {
+define void @combine_v256i8amcast_with_load(ptr %src_ptr, ptr %dst_ptr) {
 ; CHECK-LABEL: @combine_v256i8amcast_with_load(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr [[SRC_PTR:%.*]], i64 32)
@@ -157,7 +157,7 @@ define void @combine_v256i8amcast_with_load(i8* %src_ptr, <256 x i8>* %dst_ptr)
 entry:
   %vec = load <256 x i8>, ptr %src_ptr, align 256
   %tile = call x86_amx @llvm.x86.cast.vector.to.tile.v256i8(<256 x i8> %vec)
-  call void @llvm.x86.tilestored64.internal(i16 8, i16 32, <256 x i8>* %dst_ptr, i64 32, x86_amx %tile)
+  call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr %dst_ptr, i64 32, x86_amx %tile)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll
index 924572a5bfbdd..ac731b48f6712 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll
@@ -3,7 +3,7 @@
 ; RUN: -mattr=+amx-complex \
 ; RUN: -verify-machineinstrs | FileCheck %s
 
-define void @test_amx(i8* %pointer, i8* %base, i64 %stride) {
+define void @test_amx(ptr %pointer, ptr %base, i64 %stride) {
 ; CHECK-LABEL: test_amx:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
@@ -27,21 +27,21 @@ define void @test_amx(i8* %pointer, i8* %base, i64 %stride) {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
 
-  %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride)
+  %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, ptr %base, i64 %stride)
   %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
   %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
 
   %c1 = call x86_amx @llvm.x86.tcmmimfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b)
   %c2 = call x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b)
 
-  call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c2)
+  call void @llvm.x86.tilestored64.internal(i16 8, i16 8, ptr %pointer, i64 %stride, x86_amx %c2)
   ret void
 }
 
 declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
-declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
-declare x86_amx @llvm.x86.tileloaddt164.internal(i16, i16, i8*, i64)
-declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
+declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
+declare x86_amx @llvm.x86.tileloaddt164.internal(i16, i16, ptr, i64)
+declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
 
 declare x86_amx @llvm.x86.tcmmimfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
 declare x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
index 6e03e891f9d77..68d546ab09388 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
@@ -26,17 +26,17 @@ define float @test_return_f1(float %f.coerce) {
 entry:
   %retval = alloca %struct.f1, align 4
   %f = alloca %struct.f1, align 4
-  %coerce.dive = getelementptr inbounds %struct.f1, %struct.f1* %f, i32 0, i32 0
-  store float %f.coerce, float* %coerce.dive, align 4
-  %0 = bitcast %struct.f1* %retval to i8*
-  %1 = bitcast %struct.f1* %f to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
-  %coerce.dive1 = getelementptr inbounds %struct.f1, %struct.f1* %retval, i32 0, i32 0
-  %2 = load float, float* %coerce.dive1, align 4
+  %coerce.dive = getelementptr inbounds %struct.f1, ptr %f, i32 0, i32 0
+  store float %f.coerce, ptr %coerce.dive, align 4
+  %0 = bitcast ptr %retval to ptr
+  %1 = bitcast ptr %f to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 4, i1 false)
+  %coerce.dive1 = getelementptr inbounds %struct.f1, ptr %retval, i32 0, i32 0
+  %2 = load float, ptr %coerce.dive1, align 4
   ret float %2
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
 
 define double @test_return_d1(double %d.coerce) {
   ; ALL-LABEL: name: test_return_d1
@@ -55,13 +55,13 @@ define double @test_return_d1(double %d.coerce) {
 entry:
   %retval = alloca %struct.d1, align 8
   %d = alloca %struct.d1, align 8
-  %coerce.dive = getelementptr inbounds %struct.d1, %struct.d1* %d, i32 0, i32 0
-  store double %d.coerce, double* %coerce.dive, align 8
-  %0 = bitcast %struct.d1* %retval to i8*
-  %1 = bitcast %struct.d1* %d to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 8, i1 false)
-  %coerce.dive1 = getelementptr inbounds %struct.d1, %struct.d1* %retval, i32 0, i32 0
-  %2 = load double, double* %coerce.dive1, align 8
+  %coerce.dive = getelementptr inbounds %struct.d1, ptr %d, i32 0, i32 0
+  store double %d.coerce, ptr %coerce.dive, align 8
+  %0 = bitcast ptr %retval to ptr
+  %1 = bitcast ptr %d to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 8, i1 false)
+  %coerce.dive1 = getelementptr inbounds %struct.d1, ptr %retval, i32 0, i32 0
+  %2 = load double, ptr %coerce.dive1, align 8
   ret double %2
 }
 
@@ -89,16 +89,16 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
 entry:
   %retval = alloca %struct.d2, align 8
   %d = alloca %struct.d2, align 8
-  %0 = bitcast %struct.d2* %d to { double, double }*
-  %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0
-  store double %d.coerce0, double* %1, align 8
-  %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1
-  store double %d.coerce1, double* %2, align 8
-  %3 = bitcast %struct.d2* %retval to i8*
-  %4 = bitcast %struct.d2* %d to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %3, i8* align 8 %4, i64 16, i1 false)
-  %5 = bitcast %struct.d2* %retval to { double, double }*
-  %6 = load { double, double }, { double, double }* %5, align 8
+  %0 = bitcast ptr %d to ptr
+  %1 = getelementptr inbounds { double, double }, ptr %0, i32 0, i32 0
+  store double %d.coerce0, ptr %1, align 8
+  %2 = getelementptr inbounds { double, double }, ptr %0, i32 0, i32 1
+  store double %d.coerce1, ptr %2, align 8
+  %3 = bitcast ptr %retval to ptr
+  %4 = bitcast ptr %d to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %3, ptr align 8 %4, i64 16, i1 false)
+  %5 = bitcast ptr %retval to ptr
+  %6 = load { double, double }, ptr %5, align 8
   ret { double, double } %6
 }
 
@@ -119,13 +119,13 @@ define i32 @test_return_i1(i32 %i.coerce) {
 entry:
   %retval = alloca %struct.i1, align 4
   %i = alloca %struct.i1, align 4
-  %coerce.dive = getelementptr inbounds %struct.i1, %struct.i1* %i, i32 0, i32 0
-  store i32 %i.coerce, i32* %coerce.dive, align 4
-  %0 = bitcast %struct.i1* %retval to i8*
-  %1 = bitcast %struct.i1* %i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
-  %coerce.dive1 = getelementptr inbounds %struct.i1, %struct.i1* %retval, i32 0, i32 0
-  %2 = load i32, i32* %coerce.dive1, align 4
+  %coerce.dive = getelementptr inbounds %struct.i1, ptr %i, i32 0, i32 0
+  store i32 %i.coerce, ptr %coerce.dive, align 4
+  %0 = bitcast ptr %retval to ptr
+  %1 = bitcast ptr %i to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 4, i1 false)
+  %coerce.dive1 = getelementptr inbounds %struct.i1, ptr %retval, i32 0, i32 0
+  %2 = load i32, ptr %coerce.dive1, align 4
   ret i32 %2
 }
 
@@ -146,13 +146,13 @@ define i64 @test_return_i2(i64 %i.coerce) {
 entry:
   %retval = alloca %struct.i2, align 4
   %i = alloca %struct.i2, align 4
-  %0 = bitcast %struct.i2* %i to i64*
-  store i64 %i.coerce, i64* %0, align 4
-  %1 = bitcast %struct.i2* %retval to i8*
-  %2 = bitcast %struct.i2* %i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false)
-  %3 = bitcast %struct.i2* %retval to i64*
-  %4 = load i64, i64* %3, align 4
+  %0 = bitcast ptr %i to ptr
+  store i64 %i.coerce, ptr %0, align 4
+  %1 = bitcast ptr %retval to ptr
+  %2 = bitcast ptr %i to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %1, ptr align 4 %2, i64 8, i1 false)
+  %3 = bitcast ptr %retval to ptr
+  %4 = load i64, ptr %3, align 4
   ret i64 %4
 }
 
@@ -186,20 +186,20 @@ entry:
   %i = alloca %struct.i3, align 4
   %coerce = alloca { i64, i32 }, align 4
   %tmp = alloca { i64, i32 }, align 8
-  %0 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 0
-  store i64 %i.coerce0, i64* %0, align 4
-  %1 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 1
-  store i32 %i.coerce1, i32* %1, align 4
-  %2 = bitcast %struct.i3* %i to i8*
-  %3 = bitcast { i64, i32 }* %coerce to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 12, i1 false)
-  %4 = bitcast %struct.i3* %retval to i8*
-  %5 = bitcast %struct.i3* %i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false)
-  %6 = bitcast { i64, i32 }* %tmp to i8*
-  %7 = bitcast %struct.i3* %retval to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %6, i8* align 4 %7, i64 12, i1 false)
-  %8 = load { i64, i32 }, { i64, i32 }* %tmp, align 8
+  %0 = getelementptr inbounds { i64, i32 }, ptr %coerce, i32 0, i32 0
+  store i64 %i.coerce0, ptr %0, align 4
+  %1 = getelementptr inbounds { i64, i32 }, ptr %coerce, i32 0, i32 1
+  store i32 %i.coerce1, ptr %1, align 4
+  %2 = bitcast ptr %i to ptr
+  %3 = bitcast ptr %coerce to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %2, ptr align 4 %3, i64 12, i1 false)
+  %4 = bitcast ptr %retval to ptr
+  %5 = bitcast ptr %i to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %4, ptr align 4 %5, i64 12, i1 false)
+  %6 = bitcast ptr %tmp to ptr
+  %7 = bitcast ptr %retval to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %6, ptr align 4 %7, i64 12, i1 false)
+  %8 = load { i64, i32 }, ptr %tmp, align 8
   ret { i64, i32 } %8
 }
 
@@ -227,15 +227,15 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
 entry:
   %retval = alloca %struct.i4, align 4
   %i = alloca %struct.i4, align 4
-  %0 = bitcast %struct.i4* %i to { i64, i64 }*
-  %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0
-  store i64 %i.coerce0, i64* %1, align 4
-  %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1
-  store i64 %i.coerce1, i64* %2, align 4
-  %3 = bitcast %struct.i4* %retval to i8*
-  %4 = bitcast %struct.i4* %i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %4, i64 16, i1 false)
-  %5 = bitcast %struct.i4* %retval to { i64, i64 }*
-  %6 = load { i64, i64 }, { i64, i64 }* %5, align 4
+  %0 = bitcast ptr %i to ptr
+  %1 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 0
+  store i64 %i.coerce0, ptr %1, align 4
+  %2 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 1
+  store i64 %i.coerce1, ptr %2, align 4
+  %3 = bitcast ptr %retval to ptr
+  %4 = bitcast ptr %i to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %3, ptr align 4 %4, i64 16, i1 false)
+  %5 = bitcast ptr %retval to ptr
+  %6 = load { i64, i64 }, ptr %5, align 4
   ret { i64, i64 } %6
 }

diff  --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
index 78a0a849f13ea..fa9ed49da1b7f 100644
--- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -9,7 +9,7 @@
 %struct.C = type { i8, i8, i8, i8, i32, i32, i32, i64 }
 
 ; save 1,2,3 ... as one big integer.
-define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_const_store(i32 %count, ptr nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_const_store:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -47,25 +47,25 @@ define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwt
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ]
-  %2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
-  store i8 1, i8* %2, align 1
-  %3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
-  store i8 2, i8* %3, align 1
-  %4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
-  store i8 3, i8* %4, align 1
-  %5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
-  store i8 4, i8* %5, align 1
-  %6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
-  store i8 5, i8* %6, align 1
-  %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
-  store i8 6, i8* %7, align 1
-  %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
-  store i8 7, i8* %8, align 1
-  %9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
-  store i8 8, i8* %9, align 1
+  %.01 = phi ptr [ %11, %.lr.ph ], [ %p, %0 ]
+  %2 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 0
+  store i8 1, ptr %2, align 1
+  %3 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 1
+  store i8 2, ptr %3, align 1
+  %4 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 2
+  store i8 3, ptr %4, align 1
+  %5 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 3
+  store i8 4, ptr %5, align 1
+  %6 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 4
+  store i8 5, ptr %6, align 1
+  %7 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 5
+  store i8 6, ptr %7, align 1
+  %8 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 6
+  store i8 7, ptr %8, align 1
+  %9 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 7
+  store i8 8, ptr %9, align 1
   %10 = add nsw i32 %i.02, 1
-  %11 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+  %11 = getelementptr inbounds %struct.A, ptr %.01, i64 1
   %exitcond = icmp eq i32 %10, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:
@@ -73,7 +73,7 @@ define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwt
 }
 
 ; No vectors because we use noimplicitfloat
-define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimplicitfloat{
+define void @merge_const_store_no_vec(i32 %count, ptr nocapture %p) noimplicitfloat{
 ; X86-LABEL: merge_const_store_no_vec:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -118,25 +118,25 @@ define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimp
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ]
-  %2 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
-  store i32 0, i32* %2, align 4
-  %3 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
-  store i32 0, i32* %3, align 4
-  %4 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
-  store i32 0, i32* %4, align 4
-  %5 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
-  store i32 0, i32* %5, align 4
-  %6 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 4
-  store i32 0, i32* %6, align 4
-  %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 5
-  store i32 0, i32* %7, align 4
-  %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 6
-  store i32 0, i32* %8, align 4
-  %9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 7
-  store i32 0, i32* %9, align 4
+  %.01 = phi ptr [ %11, %.lr.ph ], [ %p, %0 ]
+  %2 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 0
+  store i32 0, ptr %2, align 4
+  %3 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 1
+  store i32 0, ptr %3, align 4
+  %4 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 2
+  store i32 0, ptr %4, align 4
+  %5 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 3
+  store i32 0, ptr %5, align 4
+  %6 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 4
+  store i32 0, ptr %6, align 4
+  %7 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 5
+  store i32 0, ptr %7, align 4
+  %8 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 6
+  store i32 0, ptr %8, align 4
+  %9 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 7
+  store i32 0, ptr %9, align 4
   %10 = add nsw i32 %i.02, 1
-  %11 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
+  %11 = getelementptr inbounds %struct.B, ptr %.01, i64 1
   %exitcond = icmp eq i32 %10, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:
@@ -144,7 +144,7 @@ define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimp
 }
 
 ; Move the constants using a single vector store.
-define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_const_store_vec(i32 %count, ptr nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_const_store_vec:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -184,25 +184,25 @@ define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ]
-  %2 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
-  store i32 0, i32* %2, align 4
-  %3 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
-  store i32 0, i32* %3, align 4
-  %4 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
-  store i32 0, i32* %4, align 4
-  %5 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
-  store i32 0, i32* %5, align 4
-  %6 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 4
-  store i32 0, i32* %6, align 4
-  %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 5
-  store i32 0, i32* %7, align 4
-  %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 6
-  store i32 0, i32* %8, align 4
-  %9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 7
-  store i32 0, i32* %9, align 4
+  %.01 = phi ptr [ %11, %.lr.ph ], [ %p, %0 ]
+  %2 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 0
+  store i32 0, ptr %2, align 4
+  %3 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 1
+  store i32 0, ptr %3, align 4
+  %4 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 2
+  store i32 0, ptr %4, align 4
+  %5 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 3
+  store i32 0, ptr %5, align 4
+  %6 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 4
+  store i32 0, ptr %6, align 4
+  %7 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 5
+  store i32 0, ptr %7, align 4
+  %8 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 6
+  store i32 0, ptr %8, align 4
+  %9 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 7
+  store i32 0, ptr %9, align 4
   %10 = add nsw i32 %i.02, 1
-  %11 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
+  %11 = getelementptr inbounds %struct.B, ptr %.01, i64 1
   %exitcond = icmp eq i32 %10, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:
@@ -210,7 +210,7 @@ define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind
 }
 
 ; Move the first 4 constants as a single vector. Move the rest as scalars.
-define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_nonconst_store(i32 %count, i8 %zz, ptr nocapture %p) nounwind uwtable noinline ssp {
 ; X86-BWON-LABEL: merge_nonconst_store:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -273,32 +273,32 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) n
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ]
-  %2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
-  store i8 1, i8* %2, align 1
-  %3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
-  store i8 2, i8* %3, align 1
-  %4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
-  store i8 3, i8* %4, align 1
-  %5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
-  store i8 4, i8* %5, align 1
-  %6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
-  store i8 %zz, i8* %6, align 1                     ;  <----------- Not a const;
-  %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
-  store i8 6, i8* %7, align 1
-  %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
-  store i8 7, i8* %8, align 1
-  %9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
-  store i8 8, i8* %9, align 1
+  %.01 = phi ptr [ %11, %.lr.ph ], [ %p, %0 ]
+  %2 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 0
+  store i8 1, ptr %2, align 1
+  %3 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 1
+  store i8 2, ptr %3, align 1
+  %4 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 2
+  store i8 3, ptr %4, align 1
+  %5 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 3
+  store i8 4, ptr %5, align 1
+  %6 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 4
+  store i8 %zz, ptr %6, align 1                     ;  <----------- Not a const;
+  %7 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 5
+  store i8 6, ptr %7, align 1
+  %8 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 6
+  store i8 7, ptr %8, align 1
+  %9 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 7
+  store i8 8, ptr %9, align 1
   %10 = add nsw i32 %i.02, 1
-  %11 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+  %11 = getelementptr inbounds %struct.A, ptr %.01, i64 1
   %exitcond = icmp eq i32 %10, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:
   ret void
 }
 
-define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_loads_i16(i32 %count, ptr noalias nocapture %q, ptr noalias nocapture %p) nounwind uwtable noinline ssp {
 ; X86-BWON-LABEL: merge_loads_i16:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %esi
@@ -376,21 +376,21 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
   br i1 %1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0
-  %2 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 0
-  %3 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 1
+  %2 = getelementptr inbounds %struct.A, ptr %q, i64 0, i32 0
+  %3 = getelementptr inbounds %struct.A, ptr %q, i64 0, i32 1
   br label %4
 
 ; <label>:4                                       ; preds = %4, %.lr.ph
   %i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
-  %.01 = phi %struct.A* [ %p, %.lr.ph ], [ %10, %4 ]
-  %5 = load i8, i8* %2, align 1
-  %6 = load i8, i8* %3, align 1
-  %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
-  store i8 %5, i8* %7, align 1
-  %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
-  store i8 %6, i8* %8, align 1
+  %.01 = phi ptr [ %p, %.lr.ph ], [ %10, %4 ]
+  %5 = load i8, ptr %2, align 1
+  %6 = load i8, ptr %3, align 1
+  %7 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 0
+  store i8 %5, ptr %7, align 1
+  %8 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 1
+  store i8 %6, ptr %8, align 1
   %9 = add nsw i32 %i.02, 1
-  %10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+  %10 = getelementptr inbounds %struct.A, ptr %.01, i64 1
   %exitcond = icmp eq i32 %9, %count
   br i1 %exitcond, label %._crit_edge, label %4
 
@@ -399,7 +399,7 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
 }
 
 ; The loads and the stores are interleaved. Can't merge them.
-define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
+define void @no_merge_loads(i32 %count, ptr noalias nocapture %q, ptr noalias nocapture %p) nounwind uwtable noinline ssp {
 ; X86-BWON-LABEL: no_merge_loads:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %ebx
@@ -489,21 +489,21 @@ define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct
   br i1 %1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0
-  %2 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 0
-  %3 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 1
+  %2 = getelementptr inbounds %struct.A, ptr %q, i64 0, i32 0
+  %3 = getelementptr inbounds %struct.A, ptr %q, i64 0, i32 1
   br label %a4
 
 a4:                                       ; preds = %4, %.lr.ph
   %i.02 = phi i32 [ 0, %.lr.ph ], [ %a9, %a4 ]
-  %.01 = phi %struct.A* [ %p, %.lr.ph ], [ %a10, %a4 ]
-  %a5 = load i8, i8* %2, align 1
-  %a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
-  store i8 %a5, i8* %a7, align 1
-  %a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
-  %a6 = load i8, i8* %3, align 1
-  store i8 %a6, i8* %a8, align 1
+  %.01 = phi ptr [ %p, %.lr.ph ], [ %a10, %a4 ]
+  %a5 = load i8, ptr %2, align 1
+  %a7 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 0
+  store i8 %a5, ptr %a7, align 1
+  %a8 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 1
+  %a6 = load i8, ptr %3, align 1
+  store i8 %a6, ptr %a8, align 1
   %a9 = add nsw i32 %i.02, 1
-  %a10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+  %a10 = getelementptr inbounds %struct.A, ptr %.01, i64 1
   %exitcond = icmp eq i32 %a9, %count
   br i1 %exitcond, label %._crit_edge, label %a4
 
@@ -511,7 +511,7 @@ a4:                                       ; preds = %4, %.lr.ph
   ret void
 }
 
-define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_loads_integer(i32 %count, ptr noalias nocapture %q, ptr noalias nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_loads_integer:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %edi
@@ -559,21 +559,21 @@ define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %s
   br i1 %1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0
-  %2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
-  %3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
+  %2 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 0
+  %3 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 1
   br label %4
 
 ; <label>:4                                       ; preds = %4, %.lr.ph
   %i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
-  %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %10, %4 ]
-  %5 = load i32, i32* %2
-  %6 = load i32, i32* %3
-  %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
-  store i32 %5, i32* %7
-  %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
-  store i32 %6, i32* %8
+  %.01 = phi ptr [ %p, %.lr.ph ], [ %10, %4 ]
+  %5 = load i32, ptr %2
+  %6 = load i32, ptr %3
+  %7 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 0
+  store i32 %5, ptr %7
+  %8 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 1
+  store i32 %6, ptr %8
   %9 = add nsw i32 %i.02, 1
-  %10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
+  %10 = getelementptr inbounds %struct.B, ptr %.01, i64 1
   %exitcond = icmp eq i32 %9, %count
   br i1 %exitcond, label %._crit_edge, label %4
 
@@ -581,7 +581,7 @@ define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %s
   ret void
 }
 
-define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_loads_vector(i32 %count, ptr noalias nocapture %q, ptr noalias nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_loads_vector:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -619,29 +619,29 @@ define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %st
   br i1 %a1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0
-  %a2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
-  %a3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
-  %a4 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 2
-  %a5 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 3
+  %a2 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 0
+  %a3 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 1
+  %a4 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 2
+  %a5 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 3
   br label %block4
 
 block4:                                       ; preds = %4, %.lr.ph
   %i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ]
-  %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ]
-  %a7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
-  %a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
-  %a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
-  %a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
-  %b1 = load i32, i32* %a2
-  %b2 = load i32, i32* %a3
-  %b3 = load i32, i32* %a4
-  %b4 = load i32, i32* %a5
-  store i32 %b1, i32* %a7
-  store i32 %b2, i32* %a8
-  store i32 %b3, i32* %a9
-  store i32 %b4, i32* %a10
+  %.01 = phi ptr [ %p, %.lr.ph ], [ %c10, %block4 ]
+  %a7 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 0
+  %a8 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 1
+  %a9 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 2
+  %a10 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 3
+  %b1 = load i32, ptr %a2
+  %b2 = load i32, ptr %a3
+  %b3 = load i32, ptr %a4
+  %b4 = load i32, ptr %a5
+  store i32 %b1, ptr %a7
+  store i32 %b2, ptr %a8
+  store i32 %b3, ptr %a9
+  store i32 %b4, ptr %a10
   %c9 = add nsw i32 %i.02, 1
-  %c10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
+  %c10 = getelementptr inbounds %struct.B, ptr %.01, i64 1
   %exitcond = icmp eq i32 %c9, %count
   br i1 %exitcond, label %._crit_edge, label %block4
 
@@ -650,7 +650,7 @@ block4:                                       ; preds = %4, %.lr.ph
 }
 
 ; On x86, even unaligned copies can be merged to vector ops.
-define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_loads_no_align(i32 %count, ptr noalias nocapture %q, ptr noalias nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_loads_no_align:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -688,29 +688,29 @@ define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %
   br i1 %a1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0
-  %a2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
-  %a3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
-  %a4 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 2
-  %a5 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 3
+  %a2 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 0
+  %a3 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 1
+  %a4 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 2
+  %a5 = getelementptr inbounds %struct.B, ptr %q, i64 0, i32 3
   br label %block4
 
 block4:                                       ; preds = %4, %.lr.ph
   %i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ]
-  %.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ]
-  %a7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
-  %a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
-  %a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
-  %a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
-  %b1 = load i32, i32* %a2, align 1
-  %b2 = load i32, i32* %a3, align 1
-  %b3 = load i32, i32* %a4, align 1
-  %b4 = load i32, i32* %a5, align 1
-  store i32 %b1, i32* %a7, align 1
-  store i32 %b2, i32* %a8, align 1
-  store i32 %b3, i32* %a9, align 1
-  store i32 %b4, i32* %a10, align 1
+  %.01 = phi ptr [ %p, %.lr.ph ], [ %c10, %block4 ]
+  %a7 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 0
+  %a8 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 1
+  %a9 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 2
+  %a10 = getelementptr inbounds %struct.B, ptr %.01, i64 0, i32 3
+  %b1 = load i32, ptr %a2, align 1
+  %b2 = load i32, ptr %a3, align 1
+  %b3 = load i32, ptr %a4, align 1
+  %b4 = load i32, ptr %a5, align 1
+  store i32 %b1, ptr %a7, align 1
+  store i32 %b2, ptr %a8, align 1
+  store i32 %b3, ptr %a9, align 1
+  store i32 %b4, ptr %a10, align 1
   %c9 = add nsw i32 %i.02, 1
-  %c10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
+  %c10 = getelementptr inbounds %struct.B, ptr %.01, i64 1
   %exitcond = icmp eq i32 %c9, %count
   br i1 %exitcond, label %._crit_edge, label %block4
 
@@ -720,7 +720,7 @@ block4:                                       ; preds = %4, %.lr.ph
 
 ; Make sure that we merge the consecutive load/store sequence below and use a
 ; word (16 bit) instead of a byte copy.
-define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
+define void @MergeLoadStoreBaseIndexOffset(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-LABEL: MergeLoadStoreBaseIndexOffset:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %ebx
@@ -820,19 +820,19 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
 
 ; <label>:1
   %.09 = phi i32 [ %n, %0 ], [ %11, %1 ]
-  %.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
-  %.0 = phi i64* [ %a, %0 ], [ %2, %1 ]
-  %2 = getelementptr inbounds i64, i64* %.0, i64 1
-  %3 = load i64, i64* %.0, align 1
-  %4 = getelementptr inbounds i8, i8* %c, i64 %3
-  %5 = load i8, i8* %4, align 1
+  %.08 = phi ptr [ %b, %0 ], [ %10, %1 ]
+  %.0 = phi ptr [ %a, %0 ], [ %2, %1 ]
+  %2 = getelementptr inbounds i64, ptr %.0, i64 1
+  %3 = load i64, ptr %.0, align 1
+  %4 = getelementptr inbounds i8, ptr %c, i64 %3
+  %5 = load i8, ptr %4, align 1
   %6 = add i64 %3, 1
-  %7 = getelementptr inbounds i8, i8* %c, i64 %6
-  %8 = load i8, i8* %7, align 1
-  store i8 %5, i8* %.08, align 1
-  %9 = getelementptr inbounds i8, i8* %.08, i64 1
-  store i8 %8, i8* %9, align 1
-  %10 = getelementptr inbounds i8, i8* %.08, i64 2
+  %7 = getelementptr inbounds i8, ptr %c, i64 %6
+  %8 = load i8, ptr %7, align 1
+  store i8 %5, ptr %.08, align 1
+  %9 = getelementptr inbounds i8, ptr %.08, i64 1
+  store i8 %8, ptr %9, align 1
+  %10 = getelementptr inbounds i8, ptr %.08, i64 2
   %11 = add nsw i32 %.09, -1
   %12 = icmp eq i32 %11, 0
   br i1 %12, label %13, label %1
@@ -843,7 +843,7 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
 
 ; Make sure that we merge the consecutive load/store sequence below and use a
 ; word (16 bit) instead of a byte copy for complicated address calculation.
-define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i64 %n) {
+define void @MergeLoadStoreBaseIndexOffsetComplicated(ptr %a, ptr %b, ptr %c, i64 %n) {
 ; X86-BWON-LABEL: MergeLoadStoreBaseIndexOffsetComplicated:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %ebp
@@ -969,20 +969,20 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
 
 ; <label>:1
   %.09 = phi i64 [ 0, %0 ], [ %13, %1 ]
-  %.08 = phi i8* [ %b, %0 ], [ %12, %1 ]
-  %2 = load i8, i8* %.08, align 1
+  %.08 = phi ptr [ %b, %0 ], [ %12, %1 ]
+  %2 = load i8, ptr %.08, align 1
   %3 = sext i8 %2 to i64
-  %4 = getelementptr inbounds i8, i8* %c, i64 %3
-  %5 = load i8, i8* %4, align 1
+  %4 = getelementptr inbounds i8, ptr %c, i64 %3
+  %5 = load i8, ptr %4, align 1
   %6 = add nsw i64 %3, 1
-  %7 = getelementptr inbounds i8, i8* %c, i64 %6
-  %8 = load i8, i8* %7, align 1
-  %9 = getelementptr inbounds i8, i8* %a, i64 %.09
-  store i8 %5, i8* %9, align 1
+  %7 = getelementptr inbounds i8, ptr %c, i64 %6
+  %8 = load i8, ptr %7, align 1
+  %9 = getelementptr inbounds i8, ptr %a, i64 %.09
+  store i8 %5, ptr %9, align 1
   %10 = or disjoint i64 %.09, 1
-  %11 = getelementptr inbounds i8, i8* %a, i64 %10
-  store i8 %8, i8* %11, align 1
-  %12 = getelementptr inbounds i8, i8* %.08, i64 1
+  %11 = getelementptr inbounds i8, ptr %a, i64 %10
+  store i8 %8, ptr %11, align 1
+  %12 = getelementptr inbounds i8, ptr %.08, i64 1
   %13 = add nuw nsw i64 %.09, 2
   %14 = icmp slt i64 %13, %n
   br i1 %14, label %1, label %15
@@ -994,7 +994,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
 ; Make sure that we merge the consecutive load/store sequence below and use a
 ; word (16 bit) instead of a byte copy even if there are intermediate sign
 ; extensions.
-define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
+define void @MergeLoadStoreBaseIndexOffsetSext(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-LABEL: MergeLoadStoreBaseIndexOffsetSext:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %ebx
@@ -1094,20 +1094,20 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
 
 ; <label>:1
   %.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
-  %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
-  %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
-  %2 = getelementptr inbounds i8, i8* %.0, i64 1
-  %3 = load i8, i8* %.0, align 1
+  %.08 = phi ptr [ %b, %0 ], [ %11, %1 ]
+  %.0 = phi ptr [ %a, %0 ], [ %2, %1 ]
+  %2 = getelementptr inbounds i8, ptr %.0, i64 1
+  %3 = load i8, ptr %.0, align 1
   %4 = sext i8 %3 to i64
-  %5 = getelementptr inbounds i8, i8* %c, i64 %4
-  %6 = load i8, i8* %5, align 1
+  %5 = getelementptr inbounds i8, ptr %c, i64 %4
+  %6 = load i8, ptr %5, align 1
   %7 = add i64 %4, 1
-  %8 = getelementptr inbounds i8, i8* %c, i64 %7
-  %9 = load i8, i8* %8, align 1
-  store i8 %6, i8* %.08, align 1
-  %10 = getelementptr inbounds i8, i8* %.08, i64 1
-  store i8 %9, i8* %10, align 1
-  %11 = getelementptr inbounds i8, i8* %.08, i64 2
+  %8 = getelementptr inbounds i8, ptr %c, i64 %7
+  %9 = load i8, ptr %8, align 1
+  store i8 %6, ptr %.08, align 1
+  %10 = getelementptr inbounds i8, ptr %.08, i64 1
+  store i8 %9, ptr %10, align 1
+  %11 = getelementptr inbounds i8, ptr %.08, i64 2
   %12 = add nsw i32 %.09, -1
   %13 = icmp eq i32 %12, 0
   br i1 %13, label %14, label %1
@@ -1118,7 +1118,7 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
 
 ; However, we can only merge ignore sign extensions when they are on all memory
 ; computations;
-define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
+define void @loadStoreBaseIndexOffsetSextNoSex(ptr %a, ptr %b, ptr %c, i32 %n) {
 ; X86-BWON-LABEL: loadStoreBaseIndexOffsetSextNoSex:
 ; X86-BWON:       # %bb.0:
 ; X86-BWON-NEXT:    pushl %ebp
@@ -1244,21 +1244,21 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
 
 ; <label>:1
   %.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
-  %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
-  %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
-  %2 = getelementptr inbounds i8, i8* %.0, i64 1
-  %3 = load i8, i8* %.0, align 1
+  %.08 = phi ptr [ %b, %0 ], [ %11, %1 ]
+  %.0 = phi ptr [ %a, %0 ], [ %2, %1 ]
+  %2 = getelementptr inbounds i8, ptr %.0, i64 1
+  %3 = load i8, ptr %.0, align 1
   %4 = sext i8 %3 to i64
-  %5 = getelementptr inbounds i8, i8* %c, i64 %4
-  %6 = load i8, i8* %5, align 1
+  %5 = getelementptr inbounds i8, ptr %c, i64 %4
+  %6 = load i8, ptr %5, align 1
   %7 = add i8 %3, 1
   %wrap.4 = sext i8 %7 to i64
-  %8 = getelementptr inbounds i8, i8* %c, i64 %wrap.4
-  %9 = load i8, i8* %8, align 1
-  store i8 %6, i8* %.08, align 1
-  %10 = getelementptr inbounds i8, i8* %.08, i64 1
-  store i8 %9, i8* %10, align 1
-  %11 = getelementptr inbounds i8, i8* %.08, i64 2
+  %8 = getelementptr inbounds i8, ptr %c, i64 %wrap.4
+  %9 = load i8, ptr %8, align 1
+  store i8 %6, ptr %.08, align 1
+  %10 = getelementptr inbounds i8, ptr %.08, i64 1
+  store i8 %9, ptr %10, align 1
+  %11 = getelementptr inbounds i8, ptr %.08, i64 2
   %12 = add nsw i32 %.09, -1
   %13 = icmp eq i32 %12, 0
   br i1 %13, label %14, label %1
@@ -1268,7 +1268,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
 }
 
 ; PR21711 ( http://llvm.org/bugs/show_bug.cgi?id=21711 )
-define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
+define void @merge_vec_element_store(<8 x float> %v, ptr %ptr) {
 ; X86-LABEL: merge_vec_element_store:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1289,28 +1289,28 @@ define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
   %vecext5 = extractelement <8 x float> %v, i32 5
   %vecext6 = extractelement <8 x float> %v, i32 6
   %vecext7 = extractelement <8 x float> %v, i32 7
-  %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 1
-  %arrayidx2 = getelementptr inbounds float, float* %ptr, i64 2
-  %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 3
-  %arrayidx4 = getelementptr inbounds float, float* %ptr, i64 4
-  %arrayidx5 = getelementptr inbounds float, float* %ptr, i64 5
-  %arrayidx6 = getelementptr inbounds float, float* %ptr, i64 6
-  %arrayidx7 = getelementptr inbounds float, float* %ptr, i64 7
-  store float %vecext0, float* %ptr, align 4
-  store float %vecext1, float* %arrayidx1, align 4
-  store float %vecext2, float* %arrayidx2, align 4
-  store float %vecext3, float* %arrayidx3, align 4
-  store float %vecext4, float* %arrayidx4, align 4
-  store float %vecext5, float* %arrayidx5, align 4
-  store float %vecext6, float* %arrayidx6, align 4
-  store float %vecext7, float* %arrayidx7, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %ptr, i64 1
+  %arrayidx2 = getelementptr inbounds float, ptr %ptr, i64 2
+  %arrayidx3 = getelementptr inbounds float, ptr %ptr, i64 3
+  %arrayidx4 = getelementptr inbounds float, ptr %ptr, i64 4
+  %arrayidx5 = getelementptr inbounds float, ptr %ptr, i64 5
+  %arrayidx6 = getelementptr inbounds float, ptr %ptr, i64 6
+  %arrayidx7 = getelementptr inbounds float, ptr %ptr, i64 7
+  store float %vecext0, ptr %ptr, align 4
+  store float %vecext1, ptr %arrayidx1, align 4
+  store float %vecext2, ptr %arrayidx2, align 4
+  store float %vecext3, ptr %arrayidx3, align 4
+  store float %vecext4, ptr %arrayidx4, align 4
+  store float %vecext5, ptr %arrayidx5, align 4
+  store float %vecext6, ptr %arrayidx6, align 4
+  store float %vecext7, ptr %arrayidx7, align 4
   ret void
 
 }
 
 ; PR21711 - Merge vector stores into wider vector stores.
 ; These should be merged into 32-byte stores.
-define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x float>* %ptr) {
+define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, ptr %ptr) {
 ; X86-LABEL: merge_vec_extract_stores:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1325,24 +1325,24 @@ define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x flo
 ; X64-NEXT:    vmovups %ymm1, 80(%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
-  %idx2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 5
-  %idx3 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 6
+  %idx0 = getelementptr inbounds <4 x float>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <4 x float>, ptr %ptr, i64 4
+  %idx2 = getelementptr inbounds <4 x float>, ptr %ptr, i64 5
+  %idx3 = getelementptr inbounds <4 x float>, ptr %ptr, i64 6
   %shuffle0 = shufflevector <8 x float> %v1, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %shuffle1 = shufflevector <8 x float> %v1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %shuffle2 = shufflevector <8 x float> %v2, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %shuffle3 = shufflevector <8 x float> %v2, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-  store <4 x float> %shuffle0, <4 x float>* %idx0, align 16
-  store <4 x float> %shuffle1, <4 x float>* %idx1, align 16
-  store <4 x float> %shuffle2, <4 x float>* %idx2, align 16
-  store <4 x float> %shuffle3, <4 x float>* %idx3, align 16
+  store <4 x float> %shuffle0, ptr %idx0, align 16
+  store <4 x float> %shuffle1, ptr %idx1, align 16
+  store <4 x float> %shuffle2, ptr %idx2, align 16
+  store <4 x float> %shuffle3, ptr %idx3, align 16
   ret void
 
 }
 
 ; Merging vector stores when sourced from vector loads.
-define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) {
+define void @merge_vec_stores_from_loads(ptr %v, ptr %ptr) {
 ; X86-LABEL: merge_vec_stores_from_loads:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1358,19 +1358,19 @@ define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) {
 ; X64-NEXT:    vmovups %ymm0, (%rsi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
-  %load_idx0 = getelementptr inbounds <4 x float>, <4 x float>* %v, i64 0
-  %load_idx1 = getelementptr inbounds <4 x float>, <4 x float>* %v, i64 1
-  %v0 = load <4 x float>, <4 x float>* %load_idx0
-  %v1 = load <4 x float>, <4 x float>* %load_idx1
-  %store_idx0 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 0
-  %store_idx1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 1
-  store <4 x float> %v0, <4 x float>* %store_idx0, align 16
-  store <4 x float> %v1, <4 x float>* %store_idx1, align 16
+  %load_idx0 = getelementptr inbounds <4 x float>, ptr %v, i64 0
+  %load_idx1 = getelementptr inbounds <4 x float>, ptr %v, i64 1
+  %v0 = load <4 x float>, ptr %load_idx0
+  %v1 = load <4 x float>, ptr %load_idx1
+  %store_idx0 = getelementptr inbounds <4 x float>, ptr %ptr, i64 0
+  %store_idx1 = getelementptr inbounds <4 x float>, ptr %ptr, i64 1
+  store <4 x float> %v0, ptr %store_idx0, align 16
+  store <4 x float> %v1, ptr %store_idx1, align 16
   ret void
 
 }
 
-define void @merge_vec_stores_of_zero(<4 x i32>* %ptr) {
+define void @merge_vec_stores_of_zero(ptr %ptr) {
 ; X86-LABEL: merge_vec_stores_of_zero:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1385,14 +1385,14 @@ define void @merge_vec_stores_of_zero(<4 x i32>* %ptr) {
 ; X64-NEXT:    vmovups %ymm0, 48(%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 4
-  store <4 x i32> zeroinitializer, <4 x i32>* %idx0, align 16
-  store <4 x i32> zeroinitializer, <4 x i32>* %idx1, align 16
+  %idx0 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 4
+  store <4 x i32> zeroinitializer, ptr %idx0, align 16
+  store <4 x i32> zeroinitializer, ptr %idx1, align 16
   ret void
 }
 
-define void @merge_vec_stores_of_constant_splat(<4 x i32>* %ptr) {
+define void @merge_vec_stores_of_constant_splat(ptr %ptr) {
 ; X86-LABEL: merge_vec_stores_of_constant_splat:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1407,14 +1407,14 @@ define void @merge_vec_stores_of_constant_splat(<4 x i32>* %ptr) {
 ; X64-NEXT:    vmovaps %xmm0, 48(%rdi)
 ; X64-NEXT:    vmovaps %xmm0, 64(%rdi)
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 4
-  store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, <4 x i32>* %idx0, align 16
-  store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, <4 x i32>* %idx1, align 16
+  %idx0 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 4
+  store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, ptr %idx0, align 16
+  store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, ptr %idx1, align 16
   ret void
 }
 
-define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) {
+define void @merge_vec_stores_of_constants(ptr %ptr) {
 ; X86-LABEL: merge_vec_stores_of_constants:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1431,14 +1431,14 @@ define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) {
 ; X64-NEXT:    vmovaps {{.*#+}} xmm0 = [0,265,26,0]
 ; X64-NEXT:    vmovaps %xmm0, 64(%rdi)
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 4
-  store <4 x i32> <i32 25, i32 51, i32 45, i32 0>, <4 x i32>* %idx0, align 16
-  store <4 x i32> <i32 0, i32 265, i32 26, i32 0>, <4 x i32>* %idx1, align 16
+  %idx0 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 4
+  store <4 x i32> <i32 25, i32 51, i32 45, i32 0>, ptr %idx0, align 16
+  store <4 x i32> <i32 0, i32 265, i32 26, i32 0>, ptr %idx1, align 16
   ret void
 }
 
-define void @merge_vec_stores_of_constants_with_undefs(<4 x i32>* %ptr) {
+define void @merge_vec_stores_of_constants_with_undefs(ptr %ptr) {
 ; X86-LABEL: merge_vec_stores_of_constants_with_undefs:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1453,16 +1453,16 @@ define void @merge_vec_stores_of_constants_with_undefs(<4 x i32>* %ptr) {
 ; X64-NEXT:    vmovups %ymm0, 48(%rdi)
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 4
-  store <4 x i32> <i32 0, i32 0, i32 0, i32 undef>, <4 x i32>* %idx0, align 16
-  store <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, <4 x i32>* %idx1, align 16
+  %idx0 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <4 x i32>, ptr %ptr, i64 4
+  store <4 x i32> <i32 0, i32 0, i32 0, i32 undef>, ptr %idx0, align 16
+  store <4 x i32> <i32 0, i32 undef, i32 0, i32 0>, ptr %idx1, align 16
   ret void
 }
 
 ; This is a minimized test based on real code that was failing.
 ; This should now be merged.
-define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
+define void @merge_vec_element_and_scalar_load(ptr %array) {
 ; X86-LABEL: merge_vec_element_and_scalar_load:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1479,24 +1479,24 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
 ; X64-NEXT:    vmovups (%rdi), %xmm0
 ; X64-NEXT:    vmovups %xmm0, 32(%rdi)
 ; X64-NEXT:    retq
-  %idx0 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 0
-  %idx1 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 1
-  %idx4 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 4
-  %idx5 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 5
+  %idx0 = getelementptr inbounds [6 x i64], ptr %array, i64 0, i64 0
+  %idx1 = getelementptr inbounds [6 x i64], ptr %array, i64 0, i64 1
+  %idx4 = getelementptr inbounds [6 x i64], ptr %array, i64 0, i64 4
+  %idx5 = getelementptr inbounds [6 x i64], ptr %array, i64 0, i64 5
 
-  %a0 = load i64, i64* %idx0, align 8
-  store i64 %a0, i64* %idx4, align 8
+  %a0 = load i64, ptr %idx0, align 8
+  store i64 %a0, ptr %idx4, align 8
 
-  %b = bitcast i64* %idx1 to <2 x i64>*
-  %v = load <2 x i64>, <2 x i64>* %b, align 8
+  %b = bitcast ptr %idx1 to ptr
+  %v = load <2 x i64>, ptr %b, align 8
   %a1 = extractelement <2 x i64> %v, i32 0
-  store i64 %a1, i64* %idx5, align 8
+  store i64 %a1, ptr %idx5, align 8
   ret void
 
 }
 
 ; Don't let a non-consecutive store thwart merging of the last two.
-define void @almost_consecutive_stores(i8* %p) {
+define void @almost_consecutive_stores(ptr %p) {
 ; X86-LABEL: almost_consecutive_stores:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1511,18 +1511,18 @@ define void @almost_consecutive_stores(i8* %p) {
 ; X64-NEXT:    movb $1, 42(%rdi)
 ; X64-NEXT:    movw $770, 2(%rdi) # imm = 0x302
 ; X64-NEXT:    retq
-  store i8 0, i8* %p
-  %p1 = getelementptr i8, i8* %p, i64 42
-  store i8 1, i8* %p1
-  %p2 = getelementptr i8, i8* %p, i64 2
-  store i8 2, i8* %p2
-  %p3 = getelementptr i8, i8* %p, i64 3
-  store i8 3, i8* %p3
+  store i8 0, ptr %p
+  %p1 = getelementptr i8, ptr %p, i64 42
+  store i8 1, ptr %p1
+  %p2 = getelementptr i8, ptr %p, i64 2
+  store i8 2, ptr %p2
+  %p3 = getelementptr i8, ptr %p, i64 3
+  store i8 3, ptr %p3
   ret void
 }
 
 ; We should be able to merge these.
-define void @merge_bitcast(<4 x i32> %v, float* %ptr) {
+define void @merge_bitcast(<4 x i32> %v, ptr %ptr) {
 ; X86-LABEL: merge_bitcast:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1541,19 +1541,19 @@ define void @merge_bitcast(<4 x i32> %v, float* %ptr) {
   %f1 = bitcast i32 %vecext1 to float
   %f2 = bitcast i32 %vecext2 to float
   %f3 = bitcast i32 %vecext3 to float
-  %idx0 = getelementptr inbounds float, float* %ptr, i64 0
-  %idx1 = getelementptr inbounds float, float* %ptr, i64 1
-  %idx2 = getelementptr inbounds float, float* %ptr, i64 2
-  %idx3 = getelementptr inbounds float, float* %ptr, i64 3
-  store float %f0, float* %idx0, align 4
-  store float %f1, float* %idx1, align 4
-  store float %f2, float* %idx2, align 4
-  store float %f3, float* %idx3, align 4
+  %idx0 = getelementptr inbounds float, ptr %ptr, i64 0
+  %idx1 = getelementptr inbounds float, ptr %ptr, i64 1
+  %idx2 = getelementptr inbounds float, ptr %ptr, i64 2
+  %idx3 = getelementptr inbounds float, ptr %ptr, i64 3
+  store float %f0, ptr %idx0, align 4
+  store float %f1, ptr %idx1, align 4
+  store float %f2, ptr %idx2, align 4
+  store float %f3, ptr %idx3, align 4
   ret void
 }
 
 ; same as @merge_const_store with heterogeneous types.
-define void @merge_const_store_heterogeneous(i32 %count, %struct.C* nocapture %p) nounwind uwtable noinline ssp {
+define void @merge_const_store_heterogeneous(i32 %count, ptr nocapture %p) nounwind uwtable noinline ssp {
 ; X86-LABEL: merge_const_store_heterogeneous:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1591,19 +1591,19 @@ define void @merge_const_store_heterogeneous(i32 %count, %struct.C* nocapture %p
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %7, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.C* [ %8, %.lr.ph ], [ %p, %0 ]
-  %2 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 0, i32 0
-  store i8 1, i8* %2, align 1
-  %3 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 0, i32 1
-  store i8 2, i8* %3, align 1
-  %4 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 0, i32 2
-  store i8 3, i8* %4, align 1
-  %5 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 0, i32 3
-  store i8 4, i8* %5, align 1
-  %6 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 0, i32 4
-  store i32 134678021, i32* %6, align 1
+  %.01 = phi ptr [ %8, %.lr.ph ], [ %p, %0 ]
+  %2 = getelementptr inbounds %struct.C, ptr %.01, i64 0, i32 0
+  store i8 1, ptr %2, align 1
+  %3 = getelementptr inbounds %struct.C, ptr %.01, i64 0, i32 1
+  store i8 2, ptr %3, align 1
+  %4 = getelementptr inbounds %struct.C, ptr %.01, i64 0, i32 2
+  store i8 3, ptr %4, align 1
+  %5 = getelementptr inbounds %struct.C, ptr %.01, i64 0, i32 3
+  store i8 4, ptr %5, align 1
+  %6 = getelementptr inbounds %struct.C, ptr %.01, i64 0, i32 4
+  store i32 134678021, ptr %6, align 1
   %7 = add nsw i32 %i.02, 1
-  %8 = getelementptr inbounds %struct.C, %struct.C* %.01, i64 1
+  %8 = getelementptr inbounds %struct.C, ptr %.01, i64 1
   %exitcond = icmp eq i32 %7, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:
@@ -1611,7 +1611,7 @@ define void @merge_const_store_heterogeneous(i32 %count, %struct.C* nocapture %p
 }
 
 ; Merging heterogeneous integer types.
-define void @merge_heterogeneous(%struct.C* nocapture %p, %struct.C* nocapture %q) {
+define void @merge_heterogeneous(ptr nocapture %p, ptr nocapture %q) {
 ; X86-LABEL: merge_heterogeneous:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1627,30 +1627,30 @@ define void @merge_heterogeneous(%struct.C* nocapture %p, %struct.C* nocapture %
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    movq %rax, (%rsi)
 ; X64-NEXT:    retq
-  %s0 = getelementptr inbounds %struct.C, %struct.C* %p, i64 0, i32 0
-  %s1 = getelementptr inbounds %struct.C, %struct.C* %p, i64 0, i32 1
-  %s2 = getelementptr inbounds %struct.C, %struct.C* %p, i64 0, i32 2
-  %s3 = getelementptr inbounds %struct.C, %struct.C* %p, i64 0, i32 3
-  %s4 = getelementptr inbounds %struct.C, %struct.C* %p, i64 0, i32 4
-  %d0 = getelementptr inbounds %struct.C, %struct.C* %q, i64 0, i32 0
-  %d1 = getelementptr inbounds %struct.C, %struct.C* %q, i64 0, i32 1
-  %d2 = getelementptr inbounds %struct.C, %struct.C* %q, i64 0, i32 2
-  %d3 = getelementptr inbounds %struct.C, %struct.C* %q, i64 0, i32 3
-  %d4 = getelementptr inbounds %struct.C, %struct.C* %q, i64 0, i32 4
-  %v0 = load i8, i8* %s0, align 1
-  %v1 = load i8, i8* %s1, align 1
-  %v2 = load i8, i8* %s2, align 1
-  %v3 = load i8, i8* %s3, align 1
-  %v4 = load i32, i32* %s4, align 1
-  store i8 %v0, i8* %d0, align 1
-  store i8 %v1, i8* %d1, align 1
-  store i8 %v2, i8* %d2, align 1
-  store i8 %v3, i8* %d3, align 1
-  store i32 %v4, i32* %d4, align 4
+  %s0 = getelementptr inbounds %struct.C, ptr %p, i64 0, i32 0
+  %s1 = getelementptr inbounds %struct.C, ptr %p, i64 0, i32 1
+  %s2 = getelementptr inbounds %struct.C, ptr %p, i64 0, i32 2
+  %s3 = getelementptr inbounds %struct.C, ptr %p, i64 0, i32 3
+  %s4 = getelementptr inbounds %struct.C, ptr %p, i64 0, i32 4
+  %d0 = getelementptr inbounds %struct.C, ptr %q, i64 0, i32 0
+  %d1 = getelementptr inbounds %struct.C, ptr %q, i64 0, i32 1
+  %d2 = getelementptr inbounds %struct.C, ptr %q, i64 0, i32 2
+  %d3 = getelementptr inbounds %struct.C, ptr %q, i64 0, i32 3
+  %d4 = getelementptr inbounds %struct.C, ptr %q, i64 0, i32 4
+  %v0 = load i8, ptr %s0, align 1
+  %v1 = load i8, ptr %s1, align 1
+  %v2 = load i8, ptr %s2, align 1
+  %v3 = load i8, ptr %s3, align 1
+  %v4 = load i32, ptr %s4, align 1
+  store i8 %v0, ptr %d0, align 1
+  store i8 %v1, ptr %d1, align 1
+  store i8 %v2, ptr %d2, align 1
+  store i8 %v3, ptr %d3, align 1
+  store i32 %v4, ptr %d4, align 4
   ret void
 }
 
-define i32 @merge_store_load_store_seq(i32* %buff) {
+define i32 @merge_store_load_store_seq(ptr %buff) {
 ; X86-LABEL: merge_store_load_store_seq:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -1666,14 +1666,14 @@ define i32 @merge_store_load_store_seq(i32* %buff) {
 ; X64-NEXT:    retq
 entry:
 
-  store i32 0, i32* %buff, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %buff, i64 1
-  %0 = load i32, i32* %arrayidx1, align 4
-  store i32 0, i32* %arrayidx1, align 4
+  store i32 0, ptr %buff, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %buff, i64 1
+  %0 = load i32, ptr %arrayidx1, align 4
+  store i32 0, ptr %arrayidx1, align 4
   ret i32 %0
 }
 
-define i32 @merge_store_alias(i32* %buff, i32* %other) {
+define i32 @merge_store_alias(ptr %buff, ptr %other) {
 ; X86-LABEL: merge_store_alias:
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -1691,9 +1691,9 @@ define i32 @merge_store_alias(i32* %buff, i32* %other) {
 ; X64-NEXT:    retq
 entry:
 
-  store i32 0, i32* %buff, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %buff, i64 1
-  %0 = load i32, i32* %other, align 4
-  store i32 0, i32* %arrayidx1, align 4
+  store i32 0, ptr %buff, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %buff, i64 1
+  %0 = load i32, ptr %other, align 4
+  store i32 0, ptr %arrayidx1, align 4
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/X86/PR37310.mir b/llvm/test/CodeGen/X86/PR37310.mir
index 05e3f2c561f19..fa0368a1b5b3d 100644
--- a/llvm/test/CodeGen/X86/PR37310.mir
+++ b/llvm/test/CodeGen/X86/PR37310.mir
@@ -31,35 +31,35 @@
     br i1 %cmp, label %if.end, label %if.then
   
   if.then:                                          ; preds = %entry
-    %0 = bitcast [128 x i32]* %q to i8*
-    call void @llvm.lifetime.start.p0i8(i64 512, i8* nonnull %0)
-    %arrayidx2 = bitcast [128 x i32]* %q to i32*
-    %call = call i32 @inita(i32* nonnull %arrayidx2)
-    call void @llvm.lifetime.end.p0i8(i64 512, i8* nonnull %0)
+    %0 = bitcast ptr %q to ptr
+    call void @llvm.lifetime.start.p0(i64 512, ptr nonnull %0)
+    %arrayidx2 = bitcast ptr %q to ptr
+    %call = call i32 @inita(ptr nonnull %arrayidx2)
+    call void @llvm.lifetime.end.p0(i64 512, ptr nonnull %0)
     br label %return
 
   unreachable:
     br label %return
   
   if.end:                                           ; preds = %entry
-    %1 = bitcast [128 x i32]* %r to i8*
-    call void @llvm.lifetime.start.p0i8(i64 512, i8* nonnull %1)
-    %arrayidx1 = getelementptr inbounds [128 x i32], [128 x i32]* %r, i64 0, i64 3
-    %call2 = call i32 @inita(i32* nonnull %arrayidx1)
-    call void @llvm.lifetime.end.p0i8(i64 512, i8* nonnull %1)
+    %1 = bitcast ptr %r to ptr
+    call void @llvm.lifetime.start.p0(i64 512, ptr nonnull %1)
+    %arrayidx1 = getelementptr inbounds [128 x i32], ptr %r, i64 0, i64 3
+    %call2 = call i32 @inita(ptr nonnull %arrayidx1)
+    call void @llvm.lifetime.end.p0(i64 512, ptr nonnull %1)
     br label %return
   
   return:                                           ; preds = %if.end, %if.then
     ret void
   }
   
-  declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+  declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
   
-  declare i32 @inita(i32*)
+  declare i32 @inita(ptr)
   
-  declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+  declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
   
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
   
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/atomic-dagsched.ll b/llvm/test/CodeGen/X86/atomic-dagsched.ll
index 15cf96f473c8b..4cb0611a2295f 100644
--- a/llvm/test/CodeGen/X86/atomic-dagsched.ll
+++ b/llvm/test/CodeGen/X86/atomic-dagsched.ll
@@ -1,25 +1,25 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s
 
-define void @test(i8** %a, i64* %b, i64 %c, i64 %d) nounwind {
+define void @test(ptr %a, ptr %b, i64 %c, i64 %d) nounwind {
 entry:
-  %ptrtoarg4 = load i8*, i8** %a, align 8
-  %brglist1 = getelementptr i8*, i8** %a, i64 1
-  %ptrtoarg25 = load i8*, i8** %brglist1, align 8
-  %0 = load i64, i64* %b, align 8
+  %ptrtoarg4 = load ptr, ptr %a, align 8
+  %brglist1 = getelementptr ptr, ptr %a, i64 1
+  %ptrtoarg25 = load ptr, ptr %brglist1, align 8
+  %0 = load i64, ptr %b, align 8
   %1 = mul i64 %0, 4
-  %scevgep = getelementptr i8, i8* %ptrtoarg25, i64 %1
+  %scevgep = getelementptr i8, ptr %ptrtoarg25, i64 %1
   %2 = mul i64 %d, 4
   br label %loop.cond
 
 loop.cond:                                        ; preds = %test.exit, %entry
-  %asr.iv6 = phi i8* [ %29, %test.exit ], [ %scevgep, %entry ]
+  %asr.iv6 = phi ptr [ %29, %test.exit ], [ %scevgep, %entry ]
   %iv = phi i64 [ %0, %entry ], [ %28, %test.exit ]
   %3 = icmp eq i64 %iv, %c
   br i1 %3, label %return, label %loop
 
 loop:                                             ; preds = %loop.cond
-  %4 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
-  %5 = load i64, i64* %4, align 8
+  %4 = load ptr, ptr addrspace(256) inttoptr (i64 264 to ptr addrspace(256)), align 8
+  %5 = load i64, ptr %4, align 8
   %vector.size.i = ashr i64 %5, 3
   %num.vector.wi.i = shl i64 %vector.size.i, 3
   %6 = icmp eq i64 %vector.size.i, 0
@@ -32,11 +32,11 @@ dim_0_vector_pre_head.i:                          ; preds = %loop
   br label %vector_kernel_entry.i
 
 vector_kernel_entry.i:                            ; preds = %vector_kernel_entry.i, %dim_0_vector_pre_head.i
-  %asr.iv9 = phi i8* [ %scevgep10, %vector_kernel_entry.i ], [ %asr.iv6, %dim_0_vector_pre_head.i ]
+  %asr.iv9 = phi ptr [ %scevgep10, %vector_kernel_entry.i ], [ %asr.iv6, %dim_0_vector_pre_head.i ]
   %asr.iv = phi i64 [ %asr.iv.next, %vector_kernel_entry.i ], [ %vector.size.i, %dim_0_vector_pre_head.i ]
-  %8 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
-  %asr.iv911 = addrspacecast i8* %asr.iv9 to <8 x i32> addrspace(1)*
-  %9 = load <8 x i32>, <8 x i32> addrspace(1)* %asr.iv911, align 4
+  %8 = addrspacecast ptr %ptrtoarg4 to ptr addrspace(1)
+  %asr.iv911 = addrspacecast ptr %asr.iv9 to ptr addrspace(1)
+  %9 = load <8 x i32>, ptr addrspace(1) %asr.iv911, align 4
   %extract8vector_func.i = extractelement <8 x i32> %9, i32 0
   %extract9vector_func.i = extractelement <8 x i32> %9, i32 1
   %extract10vector_func.i = extractelement <8 x i32> %9, i32 2
@@ -45,17 +45,17 @@ vector_kernel_entry.i:                            ; preds = %vector_kernel_entry
   %extract13vector_func.i = extractelement <8 x i32> %9, i32 5
   %extract14vector_func.i = extractelement <8 x i32> %9, i32 6
   %extract15vector_func.i = extractelement <8 x i32> %9, i32 7
-  %10 = atomicrmw min i32 addrspace(1)* %8, i32 %extract8vector_func.i seq_cst
-  %11 = atomicrmw min i32 addrspace(1)* %8, i32 %extract9vector_func.i seq_cst
-  %12 = atomicrmw min i32 addrspace(1)* %8, i32 %extract10vector_func.i seq_cst
-  %13 = atomicrmw min i32 addrspace(1)* %8, i32 %extract11vector_func.i seq_cst
-  %14 = atomicrmw min i32 addrspace(1)* %8, i32 %extract12vector_func.i seq_cst
-  %15 = atomicrmw min i32 addrspace(1)* %8, i32 %extract13vector_func.i seq_cst
-  %16 = atomicrmw min i32 addrspace(1)* %8, i32 %extract14vector_func.i seq_cst
-  %17 = atomicrmw min i32 addrspace(1)* %8, i32 %extract15vector_func.i seq_cst
-  store <8 x i32> %vectorvector_func.i, <8 x i32> addrspace(1)* %asr.iv911, align 4
+  %10 = atomicrmw min ptr addrspace(1) %8, i32 %extract8vector_func.i seq_cst
+  %11 = atomicrmw min ptr addrspace(1) %8, i32 %extract9vector_func.i seq_cst
+  %12 = atomicrmw min ptr addrspace(1) %8, i32 %extract10vector_func.i seq_cst
+  %13 = atomicrmw min ptr addrspace(1) %8, i32 %extract11vector_func.i seq_cst
+  %14 = atomicrmw min ptr addrspace(1) %8, i32 %extract12vector_func.i seq_cst
+  %15 = atomicrmw min ptr addrspace(1) %8, i32 %extract13vector_func.i seq_cst
+  %16 = atomicrmw min ptr addrspace(1) %8, i32 %extract14vector_func.i seq_cst
+  %17 = atomicrmw min ptr addrspace(1) %8, i32 %extract15vector_func.i seq_cst
+  store <8 x i32> %vectorvector_func.i, ptr addrspace(1) %asr.iv911, align 4
   %asr.iv.next = add i64 %asr.iv, -1
-  %scevgep10 = getelementptr i8, i8* %asr.iv9, i64 32
+  %scevgep10 = getelementptr i8, ptr %asr.iv9, i64 32
   %dim_0_vector_cmp.to.max.i = icmp eq i64 %asr.iv.next, 0
   br i1 %dim_0_vector_cmp.to.max.i, label %scalarIf.i, label %vector_kernel_entry.i
 
@@ -65,35 +65,35 @@ scalarIf.i:                                       ; preds = %vector_kernel_entry
   br i1 %18, label %test.exit, label %dim_0_pre_head.i
 
 dim_0_pre_head.i:                                 ; preds = %scalarIf.i
-  %19 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
-  %20 = load i64, i64* %19, align 8
+  %19 = load ptr, ptr addrspace(256) inttoptr (i64 264 to ptr addrspace(256)), align 8
+  %20 = load i64, ptr %19, align 8
   %21 = trunc i64 %20 to i32
   %22 = mul i64 %vector.size.i, 8
   br label %scalar_kernel_entry.i
 
 scalar_kernel_entry.i:                            ; preds = %scalar_kernel_entry.i, %dim_0_pre_head.i
   %asr.iv12 = phi i64 [ %asr.iv.next13, %scalar_kernel_entry.i ], [ %22, %dim_0_pre_head.i ]
-  %23 = addrspacecast i8* %asr.iv6 to i32 addrspace(1)*
-  %24 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
-  %scevgep16 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
-  %25 = load i32, i32 addrspace(1)* %scevgep16, align 4
-  %26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
-  %scevgep15 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
-  store i32 %21, i32 addrspace(1)* %scevgep15, align 4
+  %23 = addrspacecast ptr %asr.iv6 to ptr addrspace(1)
+  %24 = addrspacecast ptr %ptrtoarg4 to ptr addrspace(1)
+  %scevgep16 = getelementptr i32, ptr addrspace(1) %23, i64 %asr.iv12
+  %25 = load i32, ptr addrspace(1) %scevgep16, align 4
+  %26 = atomicrmw min ptr addrspace(1) %24, i32 %25 seq_cst
+  %scevgep15 = getelementptr i32, ptr addrspace(1) %23, i64 %asr.iv12
+  store i32 %21, ptr addrspace(1) %scevgep15, align 4
   %asr.iv.next13 = add i64 %asr.iv12, 1
   %dim_0_cmp.to.max.i = icmp eq i64 %5, %asr.iv.next13
   br i1 %dim_0_cmp.to.max.i, label %test.exit, label %scalar_kernel_entry.i
 
 test.exit:                     ; preds = %scalar_kernel_entry.i, %scalarIf.i
-  %27 = bitcast i8* %asr.iv6 to i1*
+  %27 = bitcast ptr %asr.iv6 to ptr
   %28 = add i64 %iv, %d
-  store i64 %28, i64* %b, align 8
-  %scevgep8 = getelementptr i1, i1* %27, i64 %2
-  %29 = bitcast i1* %scevgep8 to i8*
+  store i64 %28, ptr %b, align 8
+  %scevgep8 = getelementptr i1, ptr %27, i64 %2
+  %29 = bitcast ptr %scevgep8 to ptr
   br label %loop.cond
 
 return:                                           ; preds = %loop.cond
-  store i64 %0, i64* %b, align 8
+  store i64 %0, ptr %b, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/atomic-nocx16.ll b/llvm/test/CodeGen/X86/atomic-nocx16.ll
index a014da80f189b..c854a21d30bc9 100644
--- a/llvm/test/CodeGen/X86/atomic-nocx16.ll
+++ b/llvm/test/CodeGen/X86/atomic-nocx16.ll
@@ -44,22 +44,22 @@ entry:
 }
 
 ; CHECK-LABEL: test_fp:
-define void @test_fp(fp128* %a) nounwind {
+define void @test_fp(ptr %a) nounwind {
 entry:
 ; CHECK: __atomic_exchange_16
 ; CHECK32: __atomic_exchange
-  %0 = atomicrmw xchg fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+  %0 = atomicrmw xchg ptr %a, fp128 0xL00000000000000004000900000000000 seq_cst
 ; CHECK: __atomic_compare_exchange_16
 ; CHECK32: __atomic_compare_exchange
-  %1 = atomicrmw fadd fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+  %1 = atomicrmw fadd ptr %a, fp128 0xL00000000000000004000900000000000 seq_cst
 ; CHECK: __atomic_compare_exchange_16
 ; CHECK32: __atomic_compare_exchange
-  %2 = atomicrmw fsub fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+  %2 = atomicrmw fsub ptr %a, fp128 0xL00000000000000004000900000000000 seq_cst
 ; CHECK: __atomic_load_16
 ; CHECK32: __atomic_load
-  %3 = load atomic fp128, fp128* %a seq_cst, align 16
+  %3 = load atomic fp128, ptr %a seq_cst, align 16
 ; CHECK: __atomic_store_16
 ; CHECK32: __atomic_store
-  store atomic fp128 %3, fp128* %a seq_cst, align 16
+  store atomic fp128 %3, ptr %a seq_cst, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change.mir b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change.mir
index fee6de870e3d6..679a908893f6e 100644
--- a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change.mir
+++ b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change.mir
@@ -23,26 +23,26 @@
   target triple = "x86_64-unknown-unknown"
 
   ; Function Attrs: norecurse nounwind uwtable
-  define dso_local void @debug(<4 x float>* noalias nocapture %p1, <4 x float>* noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
+  define dso_local void @debug(ptr noalias nocapture %p1, ptr noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
   entry:
-    call void @llvm.dbg.value(metadata <4 x float>* %p1, metadata !21, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata <4 x float>* %p2, metadata !22, metadata !DIExpression()), !dbg !25
-    %0 = bitcast <4 x float>* %p1 to i8*, !dbg !26
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    store i8 0, i8* %0, align 1, !dbg !27
-    %1 = load <4 x float>, <4 x float>* %p1, align 16, !dbg !28
+    call void @llvm.dbg.value(metadata ptr %p1, metadata !21, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %p2, metadata !22, metadata !DIExpression()), !dbg !25
+    %0 = bitcast ptr %p1 to ptr, !dbg !26
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    store i8 0, ptr %0, align 1, !dbg !27
+    %1 = load <4 x float>, ptr %p1, align 16, !dbg !28
     call void @llvm.dbg.value(metadata <4 x float> %1, metadata !24, metadata !DIExpression()), !dbg !25
-    store <4 x float> %1, <4 x float>* %p2, align 16, !dbg !29
+    store <4 x float> %1, ptr %p2, align 16, !dbg !29
     ret void, !dbg !30
   }
 
   ; Function Attrs: norecurse nounwind uwtable
-  define dso_local void @nodebug(<4 x float>* noalias nocapture %p1, <4 x float>* noalias nocapture %p2) local_unnamed_addr #0 {
+  define dso_local void @nodebug(ptr noalias nocapture %p1, ptr noalias nocapture %p2) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast <4 x float>* %p1 to i8*
-    store i8 0, i8* %0, align 1
-    %1 = load <4 x float>, <4 x float>* %p1, align 16
-    store <4 x float> %1, <4 x float>* %p2, align 16
+    %0 = bitcast ptr %p1 to ptr
+    store i8 0, ptr %0, align 1
+    %1 = load <4 x float>, ptr %p1, align 16
+    store <4 x float> %1, ptr %p2, align 16
     ret void
   }
 
@@ -50,7 +50,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind readnone speculatable }

diff  --git a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change2.mir b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change2.mir
index 4b54a0e1b09ee..4da4f03987672 100644
--- a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change2.mir
+++ b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change2.mir
@@ -26,34 +26,34 @@
   target triple = "x86_64-unknown-unknown"
 
   ; Function Attrs: norecurse nounwind uwtable
-  define dso_local void @debug(<4 x float>* noalias nocapture %p1, <4 x float>* noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
+  define dso_local void @debug(ptr noalias nocapture %p1, ptr noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
   entry:
-    call void @llvm.dbg.value(metadata <4 x float>* %p1, metadata !21, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata <4 x float>* %p2, metadata !22, metadata !DIExpression()), !dbg !25
-    %0 = bitcast <4 x float>* %p1 to i8*, !dbg !26
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    store i8 0, i8* %0, align 1, !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !25
-    %1 = load <4 x float>, <4 x float>* %p1, align 16, !dbg !28
+    call void @llvm.dbg.value(metadata ptr %p1, metadata !21, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %p2, metadata !22, metadata !DIExpression()), !dbg !25
+    %0 = bitcast ptr %p1 to ptr, !dbg !26
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    store i8 0, ptr %0, align 1, !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !25
+    %1 = load <4 x float>, ptr %p1, align 16, !dbg !28
     call void @llvm.dbg.value(metadata <4 x float> %1, metadata !24, metadata !DIExpression()), !dbg !25
-    store <4 x float> %1, <4 x float>* %p2, align 16, !dbg !29
+    store <4 x float> %1, ptr %p2, align 16, !dbg !29
     ret void, !dbg !30
   }
 
@@ -61,7 +61,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind readnone speculatable }

diff  --git a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change3.mir b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change3.mir
index 7cd54abb7b3bb..19e3f38c56b78 100644
--- a/llvm/test/CodeGen/X86/avoid-sfb-g-no-change3.mir
+++ b/llvm/test/CodeGen/X86/avoid-sfb-g-no-change3.mir
@@ -28,39 +28,39 @@
   target triple = "x86_64-unknown-unknown"
   
   ; Function Attrs: norecurse nounwind uwtable
-  define dso_local void @debug(<4 x float>* noalias %p1, <4 x float>* noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
+  define dso_local void @debug(ptr noalias %p1, ptr noalias nocapture %p2) local_unnamed_addr #0 !dbg !10 {
   entry:
-    call void @llvm.dbg.value(metadata <4 x float>* %p1, metadata !21, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata <4 x float>* %p2, metadata !22, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata <4 x float>* %p1, metadata !23, metadata !DIExpression()), !dbg !27
-    %tobool = icmp eq <4 x float>* %p1, null, !dbg !28
-    %0 = bitcast <4 x float>* %p1 to i8*, !dbg !29
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    store i8 0, i8* %0, align 1, !dbg !30
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
-    call void @llvm.dbg.value(metadata i8* %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %p1, metadata !21, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %p2, metadata !22, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %p1, metadata !23, metadata !DIExpression()), !dbg !27
+    %tobool = icmp eq ptr %p1, null, !dbg !28
+    %0 = bitcast ptr %p1 to ptr, !dbg !29
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    store i8 0, ptr %0, align 1, !dbg !30
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
+    call void @llvm.dbg.value(metadata ptr %0, metadata !23, metadata !DIExpression()), !dbg !27
     br i1 %tobool, label %if.end, label %if.then, !dbg !31
   
   if.then:                                          ; preds = %entry
-    %1 = load <4 x float>, <4 x float>* %p1, align 16, !dbg !32
+    %1 = load <4 x float>, ptr %p1, align 16, !dbg !32
     call void @llvm.dbg.value(metadata <4 x float> %1, metadata !24, metadata !DIExpression()), !dbg !33
-    store <4 x float> %1, <4 x float>* %p2, align 16, !dbg !34
+    store <4 x float> %1, ptr %p2, align 16, !dbg !34
     br label %if.end, !dbg !35
   
   if.end:                                           ; preds = %if.then, %entry
@@ -71,7 +71,7 @@
   declare void @llvm.dbg.value(metadata, metadata, metadata) #1
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
   
   attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind readnone speculatable }

diff  --git a/llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir b/llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir
index 14466ec271373..4e9e3c9d3efa1 100644
--- a/llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir
+++ b/llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir
@@ -8,22 +8,22 @@
   %struct.S = type { i32, i32, i32, i32 }
 
   ; Function Attrs: nounwind uwtable
-  define void @test_imm_store(%struct.S* noalias nocapture %s1, %struct.S* nocapture %s2, i32 %x, %struct.S* nocapture %s3) local_unnamed_addr #0 {
+  define void @test_imm_store(ptr noalias nocapture %s1, ptr nocapture %s2, i32 %x, ptr nocapture %s3) local_unnamed_addr #0 {
   entry:
-    %a2 = bitcast %struct.S* %s1 to i32*
-    store i32 0, i32* %a2, align 4
-    %a13 = bitcast %struct.S* %s3 to i32*
-    store i32 1, i32* %a13, align 4
-    %0 = bitcast %struct.S* %s2 to i8*
-    %1 = bitcast %struct.S* %s1 to i8*
-    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 16, i1 false)
+    %a2 = bitcast ptr %s1 to ptr
+    store i32 0, ptr %a2, align 4
+    %a13 = bitcast ptr %s3 to ptr
+    store i32 1, ptr %a13, align 4
+    %0 = bitcast ptr %s2 to ptr
+    %1 = bitcast ptr %s1 to ptr
+    call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 16, i1 false)
     ret void
   }
 
-  declare void @bar(%struct.S*) local_unnamed_addr
+  declare void @bar(ptr) local_unnamed_addr
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+  declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/avoid-sfb-offset.mir b/llvm/test/CodeGen/X86/avoid-sfb-offset.mir
index 6fd3f45d2942d..978b3e79dcb25 100644
--- a/llvm/test/CodeGen/X86/avoid-sfb-offset.mir
+++ b/llvm/test/CodeGen/X86/avoid-sfb-offset.mir
@@ -11,22 +11,22 @@
   entry:
     %a = alloca [36 x i32], align 16
     %z = alloca [36 x i32], align 16
-    %0 = bitcast [36 x i32]* %z to i8*
-    %scevgep = getelementptr inbounds [36 x i32], [36 x i32]* %a, i64 0, i64 1
-    %scevgep40 = bitcast i32* %scevgep to i8*
-    %arrayidx.9 = getelementptr inbounds [36 x i32], [36 x i32]* %a, i64 0, i64 9
-    %1 = load i32, i32* %arrayidx.9, align 4
+    %0 = bitcast ptr %z to ptr
+    %scevgep = getelementptr inbounds [36 x i32], ptr %a, i64 0, i64 1
+    %scevgep40 = bitcast ptr %scevgep to ptr
+    %arrayidx.9 = getelementptr inbounds [36 x i32], ptr %a, i64 0, i64 9
+    %1 = load i32, ptr %arrayidx.9, align 4
     %add.9 = add i32 %1, 9
-    store i32 %add.9, i32* %arrayidx.9, align 4
-    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 16 %0, i8* nonnull align 4 %scevgep40, i64 136, i1 false)
+    store i32 %add.9, ptr %arrayidx.9, align 4
+    call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 16 %0, ptr nonnull align 4 %scevgep40, i64 136, i1 false)
     ret i32 %1
   }
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+  declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { "target-cpu"="core-avx2" }
   attributes #1 = { argmemonly nounwind "target-cpu"="core-avx2" }

diff  --git a/llvm/test/CodeGen/X86/avx512f-256-set0.mir b/llvm/test/CodeGen/X86/avx512f-256-set0.mir
index 48a355c20a7ad..3915599df524d 100644
--- a/llvm/test/CodeGen/X86/avx512f-256-set0.mir
+++ b/llvm/test/CodeGen/X86/avx512f-256-set0.mir
@@ -14,9 +14,9 @@
 
   define void @main() #0 {
   bb0:
-    %gep1 = bitcast [4 x i64]* @tst_ to [4 x i64]*
-    %lsr.iv1 = bitcast [4 x i64]* %gep1 to <4 x i64>*
-    store <4 x i64> zeroinitializer, <4 x i64>* %lsr.iv1, align 16
+    %gep1 = bitcast ptr @tst_ to ptr
+    %lsr.iv1 = bitcast ptr %gep1 to ptr
+    store <4 x i64> zeroinitializer, ptr %lsr.iv1, align 16
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
index 6354e2ec6889f..b897cf4853cac 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
@@ -4,7 +4,7 @@
 ; RUN: echo '!!0 2' >> %t
 ; RUN: llc < %s -mtriple=x86_64 -basic-block-address-map -basic-block-sections=%t | FileCheck %s
 
-define void @_Z3bazb(i1 zeroext) personality i32 (...)* @__gxx_personality_v0 {
+define void @_Z3bazb(i1 zeroext) personality ptr @__gxx_personality_v0 {
   br i1 %0, label %2, label %7
 
 2:
@@ -13,8 +13,8 @@ define void @_Z3bazb(i1 zeroext) personality i32 (...)* @__gxx_personality_v0 {
   br label %9
 
 5:
-  landingpad { i8*, i32 }
-          catch i8* null
+  landingpad { ptr, i32 }
+          catch ptr null
   br label %9
 
 7:

diff  --git a/llvm/test/CodeGen/X86/basic-block-labels-mir-parse.mir b/llvm/test/CodeGen/X86/basic-block-labels-mir-parse.mir
index f11707c719895..6408f0a30af7e 100644
--- a/llvm/test/CodeGen/X86/basic-block-labels-mir-parse.mir
+++ b/llvm/test/CodeGen/X86/basic-block-labels-mir-parse.mir
@@ -23,21 +23,21 @@
     %2 = alloca i32, align 4
     %3 = alloca i8, align 1
     %4 = zext i1 %0 to i8
-    store i8 %4, i8* %3, align 1
-    %5 = load i8, i8* %3, align 1
+    store i8 %4, ptr %3, align 1
+    %5 = load i8, ptr %3, align 1
     %6 = trunc i8 %5 to i1
     br i1 %6, label %7, label %8
   
   7:                                                ; preds = %1
-    store i32 1, i32* %2, align 4
+    store i32 1, ptr %2, align 4
     br label %9
   
   8:                                                ; preds = %1
-    store i32 0, i32* %2, align 4
+    store i32 0, ptr %2, align 4
     br label %9
   
   9:                                                ; preds = %8, %7
-    %10 = load i32, i32* %2, align 4
+    %10 = load i32, ptr %2, align 4
     ret i32 %10
   }
   

diff  --git a/llvm/test/CodeGen/X86/basic-block-sections-module1.ll b/llvm/test/CodeGen/X86/basic-block-sections-module1.ll
index 9c1f7d15f10da..c719b7732648b 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-module1.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-module1.ll
@@ -42,12 +42,12 @@
 define dso_local i32 @test(i32 noundef %0) #0 !dbg !10 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1

diff  --git a/llvm/test/CodeGen/X86/basic-block-sections-module2.ll b/llvm/test/CodeGen/X86/basic-block-sections-module2.ll
index 6964fcedb32a0..44e7d2219170c 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-module2.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-module2.ll
@@ -27,12 +27,12 @@
 define dso_local i32 @test1(i32 noundef %0) #0 !dbg !10 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1
@@ -41,12 +41,12 @@ define dso_local i32 @test1(i32 noundef %0) #0 !dbg !10 {
 define dso_local i32 @test2(i32 noundef %0) #0 !dbg !11 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1
@@ -55,12 +55,12 @@ define dso_local i32 @test2(i32 noundef %0) #0 !dbg !11 {
 define dso_local i32 @test3(i32 noundef %0) #0 !dbg !12 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1
@@ -69,12 +69,12 @@ define dso_local i32 @test3(i32 noundef %0) #0 !dbg !12 {
 define dso_local i32 @test4(i32 noundef %0) #0 !dbg !13 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1
@@ -83,12 +83,12 @@ define dso_local i32 @test4(i32 noundef %0) #0 !dbg !13 {
 define dso_local i32 @test5(i32 noundef %0) #0 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %3, align 4
-  %4 = load i32, i32* %3, align 4
+  store i32 %0, ptr %3, align 4
+  %4 = load i32, ptr %3, align 4
   %5 = icmp slt i32 %4, 0
   br i1 %5, label %6, label %7
 6:                                                ; preds = %1
-  store i32 -1, i32* %2, align 4
+  store i32 -1, ptr %2, align 4
   ret i32 0
 7:
   ret i32 1

diff  --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll
index 2f9635db34a33..b134d8a369634 100644
--- a/llvm/test/CodeGen/X86/block-placement.ll
+++ b/llvm/test/CodeGen/X86/block-placement.ll
@@ -3,7 +3,7 @@
 
 declare void @error(i32 %i, i32 %a, i32 %b)
 
-define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
+define i32 @test_ifchains(i32 %i, ptr %a, i32 %b) {
 ; Test a chain of ifs, where the block guarded by the if is error handling code
 ; that is not expected to run.
 ; CHECK-LABEL: test_ifchains:
@@ -25,8 +25,8 @@ define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
 ; CHECK: %then5
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then1, label %else1, !prof !0
 
@@ -35,8 +35,8 @@ then1:
   br label %else1
 
 else1:
-  %gep2 = getelementptr i32, i32* %a, i32 2
-  %val2 = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %a, i32 2
+  %val2 = load i32, ptr %gep2
   %cond2 = icmp ugt i32 %val2, 2
   br i1 %cond2, label %then2, label %else2, !prof !0
 
@@ -45,8 +45,8 @@ then2:
   br label %else2
 
 else2:
-  %gep3 = getelementptr i32, i32* %a, i32 3
-  %val3 = load i32, i32* %gep3
+  %gep3 = getelementptr i32, ptr %a, i32 3
+  %val3 = load i32, ptr %gep3
   %cond3 = icmp ugt i32 %val3, 3
   br i1 %cond3, label %then3, label %else3, !prof !0
 
@@ -55,8 +55,8 @@ then3:
   br label %else3
 
 else3:
-  %gep4 = getelementptr i32, i32* %a, i32 4
-  %val4 = load i32, i32* %gep4
+  %gep4 = getelementptr i32, ptr %a, i32 4
+  %val4 = load i32, ptr %gep4
   %cond4 = icmp ugt i32 %val4, 4
   br i1 %cond4, label %then4, label %else4, !prof !0
 
@@ -65,8 +65,8 @@ then4:
   br label %else4
 
 else4:
-  %gep5 = getelementptr i32, i32* %a, i32 3
-  %val5 = load i32, i32* %gep5
+  %gep5 = getelementptr i32, ptr %a, i32 3
+  %val5 = load i32, ptr %gep5
   %cond5 = icmp ugt i32 %val5, 3
   br i1 %cond5, label %then5, label %exit, !prof !0
 
@@ -78,7 +78,7 @@ exit:
   ret i32 %b
 }
 
-define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
+define i32 @test_loop_cold_blocks(i32 %i, ptr %a) {
 ; Check that we sink cold loop blocks after the hot loop body.
 ; CHECK-LABEL: test_loop_cold_blocks:
 ; CHECK: %entry
@@ -114,8 +114,8 @@ unlikely2:
   br label %body3
 
 body3:
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %next = add i32 %iv, 1
   %exitcond = icmp eq i32 %next, %i
@@ -127,7 +127,7 @@ exit:
 
 !0 = !{!"branch_weights", i32 1, i32 64}
 
-define i32 @test_loop_early_exits(i32 %i, i32* %a) {
+define i32 @test_loop_early_exits(i32 %i, ptr %a) {
 ; Check that we sink early exit blocks out of loop bodies.
 ; CHECK-LABEL: test_loop_early_exits:
 ; CHECK: %entry
@@ -167,8 +167,8 @@ bail3:
   ret i32 -3
 
 body4:
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %next = add i32 %iv, 1
   %exitcond = icmp eq i32 %next, %i
@@ -184,7 +184,7 @@ exit:
 ; duplicated, we add some calls to dummy.
 declare void @dummy()
 
-define i32 @test_loop_rotate(i32 %i, i32* %a) {
+define i32 @test_loop_rotate(i32 %i, ptr %a) {
 ; Check that we rotate conditional exits from the loop to the bottom of the
 ; loop, eliminating unconditional branches to the top.
 ; CHECK-LABEL: test_loop_rotate:
@@ -206,8 +206,8 @@ body0:
   br i1 %exitcond, label %exit, label %body1
 
 body1:
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %bailcond1 = icmp eq i32 %sum, 42
   br label %body0
@@ -216,7 +216,7 @@ exit:
   ret i32 %base
 }
 
-define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
+define i32 @test_no_loop_rotate(i32 %i, ptr %a) {
 ; Check that we don't try to rotate a loop which is already laid out with
 ; fallthrough opportunities into the top and out of the bottom.
 ; CHECK-LABEL: test_no_loop_rotate:
@@ -231,8 +231,8 @@ entry:
 body0:
   %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
   %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %bailcond1 = icmp eq i32 %sum, 42
   br i1 %bailcond1, label %exit, label %body1
@@ -246,7 +246,7 @@ exit:
   ret i32 %base
 }
 
-define i32 @test_loop_align(i32 %i, i32* %a) {
+define i32 @test_loop_align(i32 %i, ptr %a) {
 ; Check that we provide basic loop body alignment with the block placement
 ; pass.
 ; CHECK-LABEL: test_loop_align:
@@ -261,8 +261,8 @@ entry:
 body:
   %iv = phi i32 [ 0, %entry ], [ %next, %body ]
   %base = phi i32 [ 0, %entry ], [ %sum, %body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %next = add i32 %iv, 1
   %exitcond = icmp eq i32 %next, %i
@@ -272,7 +272,7 @@ exit:
   ret i32 %sum
 }
 
-define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
+define i32 @test_nested_loop_align(i32 %i, ptr %a, ptr %b) {
 ; Check that we provide nested loop body alignment.
 ; CHECK-LABEL: test_nested_loop_align:
 ; CHECK: %entry
@@ -288,16 +288,16 @@ entry:
 
 loop.body.1:
   %iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %bidx = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %bidx = load i32, ptr %arrayidx
   br label %inner.loop.body
 
 inner.loop.body:
   %inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
   %base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
   %scaled_idx = mul i32 %bidx, %iv
-  %inner.arrayidx = getelementptr inbounds i32, i32* %b, i32 %scaled_idx
-  %0 = load i32, i32* %inner.arrayidx
+  %inner.arrayidx = getelementptr inbounds i32, ptr %b, i32 %scaled_idx
+  %0 = load i32, ptr %inner.arrayidx
   %sum = add nsw i32 %0, %base
   %inner.next = add i32 %iv, 1
   %inner.exitcond = icmp eq i32 %inner.next, %i
@@ -330,13 +330,13 @@ loop.body1:
   br i1 undef, label %loop.body3, label %loop.body2
 
 loop.body2:
-  %ptr = load i32*, i32** undef, align 4
+  %ptr = load ptr, ptr undef, align 4
   br label %loop.body3
 
 loop.body3:
-  %myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
-  %bcmyptr = bitcast i32* %myptr to i32*
-  %val = load i32, i32* %bcmyptr, align 4
+  %myptr = phi ptr [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
+  %bcmyptr = bitcast ptr %myptr to ptr
+  %val = load i32, ptr %bcmyptr, align 4
   %comp = icmp eq i32 %val, 48
   br i1 %comp, label %loop.body4, label %loop.body5
 
@@ -344,11 +344,11 @@ loop.body4:
   br i1 undef, label %loop.header, label %loop.body5
 
 loop.body5:
-  %ptr2 = load i32*, i32** undef, align 4
+  %ptr2 = load ptr, ptr undef, align 4
   br label %loop.body3
 }
 
-define void @unnatural_cfg2(i32* %p0, i32 %a0) {
+define void @unnatural_cfg2(ptr %p0, i32 %a0) {
 ; Test that we can handle a loop with a nested natural loop *and* an unnatural
 ; loop. This was reduced from a crash on block placement when run over
 ; single-source GCC.
@@ -368,32 +368,32 @@ entry:
   br label %loop.header
 
 loop.header:
-  %comp0 = icmp eq i32* %p0, null
+  %comp0 = icmp eq ptr %p0, null
   br i1 %comp0, label %bail, label %loop.body1
 
 loop.body1:
-  %val0 = load i32*, i32** undef, align 4
+  %val0 = load ptr, ptr undef, align 4
   br i1 undef, label %loop.body2, label %loop.inner1.begin
 
 loop.body2:
   br i1 undef, label %loop.body4, label %loop.body3
 
 loop.body3:
-  %ptr1 = getelementptr inbounds i32, i32* %val0, i32 0
-  %castptr1 = bitcast i32* %ptr1 to i32**
-  %val1 = load i32*, i32** %castptr1, align 4
+  %ptr1 = getelementptr inbounds i32, ptr %val0, i32 0
+  %castptr1 = bitcast ptr %ptr1 to ptr
+  %val1 = load ptr, ptr %castptr1, align 4
   br label %loop.inner1.begin
 
 loop.inner1.begin:
-  %valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
-  %castval = bitcast i32* %valphi to i32*
+  %valphi = phi ptr [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
+  %castval = bitcast ptr %valphi to ptr
   %comp1 = icmp eq i32 %a0, 48
   br i1 %comp1, label %loop.inner1.end, label %loop.body4
 
 loop.inner1.end:
-  %ptr2 = getelementptr inbounds i32, i32* %valphi, i32 0
-  %castptr2 = bitcast i32* %ptr2 to i32**
-  %val2 = load i32*, i32** %castptr2, align 4
+  %ptr2 = getelementptr inbounds i32, ptr %valphi, i32 0
+  %castptr2 = bitcast ptr %ptr2 to ptr
+  %val2 = load ptr, ptr %castptr2, align 4
   br label %loop.inner1.begin
 
 loop.body4.dead:
@@ -490,7 +490,7 @@ entry:
   br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
 
 entry.if.then_crit_edge:
-  %.pre14 = load i8, i8* undef, align 1
+  %.pre14 = load i8, ptr undef, align 1
   br label %if.then
 
 lor.lhs.false:
@@ -503,7 +503,7 @@ exit:
 if.then:
   %0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
   %1 = and i8 %0, 1
-  store i8 %1, i8* undef, align 4
+  store i8 %1, ptr undef, align 4
   br label %if.end
 
 if.end:
@@ -551,7 +551,7 @@ exit:
 
 declare i32 @__gxx_personality_v0(...)
 
-define void @test_eh_lpad_successor() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test_eh_lpad_successor() personality ptr @__gxx_personality_v0 {
 ; Some times the landing pad ends up as the first successor of an invoke block.
 ; When this happens, a strange result used to fall out of updateTerminators: we
 ; didn't correctly locate the fallthrough successor, assuming blindly that the
@@ -569,9 +569,9 @@ preheader:
   br label %loop
 
 lpad:
-  %lpad.val = landingpad { i8*, i32 }
+  %lpad.val = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } %lpad.val
+  resume { ptr, i32 } %lpad.val
 
 loop:
   br label %loop
@@ -579,7 +579,7 @@ loop:
 
 declare void @fake_throw() noreturn
 
-define void @test_eh_throw() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test_eh_throw() personality ptr @__gxx_personality_v0 {
 ; For blocks containing a 'throw' (or similar functionality), we have
 ; a no-return invoke. In this case, only EH successors will exist, and
 ; fallthrough simply won't occur. Make sure we don't crash trying to update
@@ -596,7 +596,7 @@ continue:
   unreachable
 
 cleanup:
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
   unreachable
 }
@@ -619,24 +619,24 @@ body:
   br label %loop2a
 
 loop1:
-  %next.load = load i32*, i32** undef
+  %next.load = load ptr, ptr undef
   br i1 %comp.a, label %loop2a, label %loop2b
 
 loop2a:
-  %var = phi i32* [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
-  %next.var = phi i32* [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
-  %comp.a = icmp eq i32* %var, null
+  %var = phi ptr [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
+  %next.var = phi ptr [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
+  %comp.a = icmp eq ptr %var, null
   br label %loop3
 
 loop2b:
-  %gep = getelementptr inbounds i32, i32* %var.phi, i32 0
-  %next.ptr = bitcast i32* %gep to i32**
-  store i32* %next.phi, i32** %next.ptr
+  %gep = getelementptr inbounds i32, ptr %var.phi, i32 0
+  %next.ptr = bitcast ptr %gep to ptr
+  store ptr %next.phi, ptr %next.ptr
   br label %loop3
 
 loop3:
-  %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
-  %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
+  %var.phi = phi ptr [ %next.phi, %loop2b ], [ %var, %loop2a ]
+  %next.phi = phi ptr [ %next.load, %loop2b ], [ %next.var, %loop2a ]
   br label %loop1
 }
 
@@ -734,199 +734,199 @@ define void @many_unanalyzable_branches() {
 entry:
   br label %0
 
-  %val0 = load volatile float, float* undef
+  %val0 = load volatile float, ptr undef
   %cmp0 = fcmp une float %val0, 0.0
   br i1 %cmp0, label %1, label %0
-  %val1 = load volatile float, float* undef
+  %val1 = load volatile float, ptr undef
   %cmp1 = fcmp une float %val1, 0.0
   br i1 %cmp1, label %2, label %1
-  %val2 = load volatile float, float* undef
+  %val2 = load volatile float, ptr undef
   %cmp2 = fcmp une float %val2, 0.0
   br i1 %cmp2, label %3, label %2
-  %val3 = load volatile float, float* undef
+  %val3 = load volatile float, ptr undef
   %cmp3 = fcmp une float %val3, 0.0
   br i1 %cmp3, label %4, label %3
-  %val4 = load volatile float, float* undef
+  %val4 = load volatile float, ptr undef
   %cmp4 = fcmp une float %val4, 0.0
   br i1 %cmp4, label %5, label %4
-  %val5 = load volatile float, float* undef
+  %val5 = load volatile float, ptr undef
   %cmp5 = fcmp une float %val5, 0.0
   br i1 %cmp5, label %6, label %5
-  %val6 = load volatile float, float* undef
+  %val6 = load volatile float, ptr undef
   %cmp6 = fcmp une float %val6, 0.0
   br i1 %cmp6, label %7, label %6
-  %val7 = load volatile float, float* undef
+  %val7 = load volatile float, ptr undef
   %cmp7 = fcmp une float %val7, 0.0
   br i1 %cmp7, label %8, label %7
-  %val8 = load volatile float, float* undef
+  %val8 = load volatile float, ptr undef
   %cmp8 = fcmp une float %val8, 0.0
   br i1 %cmp8, label %9, label %8
-  %val9 = load volatile float, float* undef
+  %val9 = load volatile float, ptr undef
   %cmp9 = fcmp une float %val9, 0.0
   br i1 %cmp9, label %10, label %9
-  %val10 = load volatile float, float* undef
+  %val10 = load volatile float, ptr undef
   %cmp10 = fcmp une float %val10, 0.0
   br i1 %cmp10, label %11, label %10
-  %val11 = load volatile float, float* undef
+  %val11 = load volatile float, ptr undef
   %cmp11 = fcmp une float %val11, 0.0
   br i1 %cmp11, label %12, label %11
-  %val12 = load volatile float, float* undef
+  %val12 = load volatile float, ptr undef
   %cmp12 = fcmp une float %val12, 0.0
   br i1 %cmp12, label %13, label %12
-  %val13 = load volatile float, float* undef
+  %val13 = load volatile float, ptr undef
   %cmp13 = fcmp une float %val13, 0.0
   br i1 %cmp13, label %14, label %13
-  %val14 = load volatile float, float* undef
+  %val14 = load volatile float, ptr undef
   %cmp14 = fcmp une float %val14, 0.0
   br i1 %cmp14, label %15, label %14
-  %val15 = load volatile float, float* undef
+  %val15 = load volatile float, ptr undef
   %cmp15 = fcmp une float %val15, 0.0
   br i1 %cmp15, label %16, label %15
-  %val16 = load volatile float, float* undef
+  %val16 = load volatile float, ptr undef
   %cmp16 = fcmp une float %val16, 0.0
   br i1 %cmp16, label %17, label %16
-  %val17 = load volatile float, float* undef
+  %val17 = load volatile float, ptr undef
   %cmp17 = fcmp une float %val17, 0.0
   br i1 %cmp17, label %18, label %17
-  %val18 = load volatile float, float* undef
+  %val18 = load volatile float, ptr undef
   %cmp18 = fcmp une float %val18, 0.0
   br i1 %cmp18, label %19, label %18
-  %val19 = load volatile float, float* undef
+  %val19 = load volatile float, ptr undef
   %cmp19 = fcmp une float %val19, 0.0
   br i1 %cmp19, label %20, label %19
-  %val20 = load volatile float, float* undef
+  %val20 = load volatile float, ptr undef
   %cmp20 = fcmp une float %val20, 0.0
   br i1 %cmp20, label %21, label %20
-  %val21 = load volatile float, float* undef
+  %val21 = load volatile float, ptr undef
   %cmp21 = fcmp une float %val21, 0.0
   br i1 %cmp21, label %22, label %21
-  %val22 = load volatile float, float* undef
+  %val22 = load volatile float, ptr undef
   %cmp22 = fcmp une float %val22, 0.0
   br i1 %cmp22, label %23, label %22
-  %val23 = load volatile float, float* undef
+  %val23 = load volatile float, ptr undef
   %cmp23 = fcmp une float %val23, 0.0
   br i1 %cmp23, label %24, label %23
-  %val24 = load volatile float, float* undef
+  %val24 = load volatile float, ptr undef
   %cmp24 = fcmp une float %val24, 0.0
   br i1 %cmp24, label %25, label %24
-  %val25 = load volatile float, float* undef
+  %val25 = load volatile float, ptr undef
   %cmp25 = fcmp une float %val25, 0.0
   br i1 %cmp25, label %26, label %25
-  %val26 = load volatile float, float* undef
+  %val26 = load volatile float, ptr undef
   %cmp26 = fcmp une float %val26, 0.0
   br i1 %cmp26, label %27, label %26
-  %val27 = load volatile float, float* undef
+  %val27 = load volatile float, ptr undef
   %cmp27 = fcmp une float %val27, 0.0
   br i1 %cmp27, label %28, label %27
-  %val28 = load volatile float, float* undef
+  %val28 = load volatile float, ptr undef
   %cmp28 = fcmp une float %val28, 0.0
   br i1 %cmp28, label %29, label %28
-  %val29 = load volatile float, float* undef
+  %val29 = load volatile float, ptr undef
   %cmp29 = fcmp une float %val29, 0.0
   br i1 %cmp29, label %30, label %29
-  %val30 = load volatile float, float* undef
+  %val30 = load volatile float, ptr undef
   %cmp30 = fcmp une float %val30, 0.0
   br i1 %cmp30, label %31, label %30
-  %val31 = load volatile float, float* undef
+  %val31 = load volatile float, ptr undef
   %cmp31 = fcmp une float %val31, 0.0
   br i1 %cmp31, label %32, label %31
-  %val32 = load volatile float, float* undef
+  %val32 = load volatile float, ptr undef
   %cmp32 = fcmp une float %val32, 0.0
   br i1 %cmp32, label %33, label %32
-  %val33 = load volatile float, float* undef
+  %val33 = load volatile float, ptr undef
   %cmp33 = fcmp une float %val33, 0.0
   br i1 %cmp33, label %34, label %33
-  %val34 = load volatile float, float* undef
+  %val34 = load volatile float, ptr undef
   %cmp34 = fcmp une float %val34, 0.0
   br i1 %cmp34, label %35, label %34
-  %val35 = load volatile float, float* undef
+  %val35 = load volatile float, ptr undef
   %cmp35 = fcmp une float %val35, 0.0
   br i1 %cmp35, label %36, label %35
-  %val36 = load volatile float, float* undef
+  %val36 = load volatile float, ptr undef
   %cmp36 = fcmp une float %val36, 0.0
   br i1 %cmp36, label %37, label %36
-  %val37 = load volatile float, float* undef
+  %val37 = load volatile float, ptr undef
   %cmp37 = fcmp une float %val37, 0.0
   br i1 %cmp37, label %38, label %37
-  %val38 = load volatile float, float* undef
+  %val38 = load volatile float, ptr undef
   %cmp38 = fcmp une float %val38, 0.0
   br i1 %cmp38, label %39, label %38
-  %val39 = load volatile float, float* undef
+  %val39 = load volatile float, ptr undef
   %cmp39 = fcmp une float %val39, 0.0
   br i1 %cmp39, label %40, label %39
-  %val40 = load volatile float, float* undef
+  %val40 = load volatile float, ptr undef
   %cmp40 = fcmp une float %val40, 0.0
   br i1 %cmp40, label %41, label %40
-  %val41 = load volatile float, float* undef
+  %val41 = load volatile float, ptr undef
   %cmp41 = fcmp une float %val41, undef
   br i1 %cmp41, label %42, label %41
-  %val42 = load volatile float, float* undef
+  %val42 = load volatile float, ptr undef
   %cmp42 = fcmp une float %val42, 0.0
   br i1 %cmp42, label %43, label %42
-  %val43 = load volatile float, float* undef
+  %val43 = load volatile float, ptr undef
   %cmp43 = fcmp une float %val43, 0.0
   br i1 %cmp43, label %44, label %43
-  %val44 = load volatile float, float* undef
+  %val44 = load volatile float, ptr undef
   %cmp44 = fcmp une float %val44, 0.0
   br i1 %cmp44, label %45, label %44
-  %val45 = load volatile float, float* undef
+  %val45 = load volatile float, ptr undef
   %cmp45 = fcmp une float %val45, 0.0
   br i1 %cmp45, label %46, label %45
-  %val46 = load volatile float, float* undef
+  %val46 = load volatile float, ptr undef
   %cmp46 = fcmp une float %val46, 0.0
   br i1 %cmp46, label %47, label %46
-  %val47 = load volatile float, float* undef
+  %val47 = load volatile float, ptr undef
   %cmp47 = fcmp une float %val47, 0.0
   br i1 %cmp47, label %48, label %47
-  %val48 = load volatile float, float* undef
+  %val48 = load volatile float, ptr undef
   %cmp48 = fcmp une float %val48, 0.0
   br i1 %cmp48, label %49, label %48
-  %val49 = load volatile float, float* undef
+  %val49 = load volatile float, ptr undef
   %cmp49 = fcmp une float %val49, 0.0
   br i1 %cmp49, label %50, label %49
-  %val50 = load volatile float, float* undef
+  %val50 = load volatile float, ptr undef
   %cmp50 = fcmp une float %val50, 0.0
   br i1 %cmp50, label %51, label %50
-  %val51 = load volatile float, float* undef
+  %val51 = load volatile float, ptr undef
   %cmp51 = fcmp une float %val51, 0.0
   br i1 %cmp51, label %52, label %51
-  %val52 = load volatile float, float* undef
+  %val52 = load volatile float, ptr undef
   %cmp52 = fcmp une float %val52, 0.0
   br i1 %cmp52, label %53, label %52
-  %val53 = load volatile float, float* undef
+  %val53 = load volatile float, ptr undef
   %cmp53 = fcmp une float %val53, 0.0
   br i1 %cmp53, label %54, label %53
-  %val54 = load volatile float, float* undef
+  %val54 = load volatile float, ptr undef
   %cmp54 = fcmp une float %val54, 0.0
   br i1 %cmp54, label %55, label %54
-  %val55 = load volatile float, float* undef
+  %val55 = load volatile float, ptr undef
   %cmp55 = fcmp une float %val55, 0.0
   br i1 %cmp55, label %56, label %55
-  %val56 = load volatile float, float* undef
+  %val56 = load volatile float, ptr undef
   %cmp56 = fcmp une float %val56, 0.0
   br i1 %cmp56, label %57, label %56
-  %val57 = load volatile float, float* undef
+  %val57 = load volatile float, ptr undef
   %cmp57 = fcmp une float %val57, 0.0
   br i1 %cmp57, label %58, label %57
-  %val58 = load volatile float, float* undef
+  %val58 = load volatile float, ptr undef
   %cmp58 = fcmp une float %val58, 0.0
   br i1 %cmp58, label %59, label %58
-  %val59 = load volatile float, float* undef
+  %val59 = load volatile float, ptr undef
   %cmp59 = fcmp une float %val59, 0.0
   br i1 %cmp59, label %60, label %59
-  %val60 = load volatile float, float* undef
+  %val60 = load volatile float, ptr undef
   %cmp60 = fcmp une float %val60, 0.0
   br i1 %cmp60, label %61, label %60
-  %val61 = load volatile float, float* undef
+  %val61 = load volatile float, ptr undef
   %cmp61 = fcmp une float %val61, 0.0
   br i1 %cmp61, label %62, label %61
-  %val62 = load volatile float, float* undef
+  %val62 = load volatile float, ptr undef
   %cmp62 = fcmp une float %val62, 0.0
   br i1 %cmp62, label %63, label %62
-  %val63 = load volatile float, float* undef
+  %val63 = load volatile float, ptr undef
   %cmp63 = fcmp une float %val63, 0.0
   br i1 %cmp63, label %64, label %63
-  %val64 = load volatile float, float* undef
+  %val64 = load volatile float, ptr undef
   %cmp64 = fcmp une float %val64, 0.0
   br i1 %cmp64, label %65, label %64
 
@@ -935,7 +935,7 @@ exit:
   ret void
 }
 
-define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
+define void @benchmark_heapsort(i32 %n, ptr nocapture %ra) {
 ; This test case comes from the heapsort benchmark, and exemplifies several
 ; important aspects to block placement in the presence of loops:
 ; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
@@ -971,7 +971,7 @@ define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
 entry:
   %shr = ashr i32 %n, 1
   %add = add nsw i32 %shr, 1
-  %arrayidx3 = getelementptr inbounds double, double* %ra, i64 1
+  %arrayidx3 = getelementptr inbounds double, ptr %ra, i64 1
   br label %for.cond
 
 for.cond:
@@ -983,22 +983,22 @@ for.cond:
 if.then:
   %dec = add nsw i32 %l.0, -1
   %idxprom = sext i32 %dec to i64
-  %arrayidx = getelementptr inbounds double, double* %ra, i64 %idxprom
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %ra, i64 %idxprom
+  %0 = load double, ptr %arrayidx, align 8
   br label %if.end10
 
 if.else:
   %idxprom1 = sext i32 %ir.0 to i64
-  %arrayidx2 = getelementptr inbounds double, double* %ra, i64 %idxprom1
-  %1 = load double, double* %arrayidx2, align 8
-  %2 = load double, double* %arrayidx3, align 8
-  store double %2, double* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %ra, i64 %idxprom1
+  %1 = load double, ptr %arrayidx2, align 8
+  %2 = load double, ptr %arrayidx3, align 8
+  store double %2, ptr %arrayidx2, align 8
   %dec6 = add nsw i32 %ir.0, -1
   %cmp7 = icmp eq i32 %dec6, 1
   br i1 %cmp7, label %if.then8, label %if.end10
 
 if.then8:
-  store double %1, double* %arrayidx3, align 8
+  store double %1, ptr %arrayidx3, align 8
   ret void
 
 if.end10:
@@ -1024,12 +1024,12 @@ while.body:
 
 land.lhs.true:
   %idxprom13 = sext i32 %j.0 to i64
-  %arrayidx14 = getelementptr inbounds double, double* %ra, i64 %idxprom13
-  %3 = load double, double* %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds double, ptr %ra, i64 %idxprom13
+  %3 = load double, ptr %arrayidx14, align 8
   %add15 = add nsw i32 %j.0, 1
   %idxprom16 = sext i32 %add15 to i64
-  %arrayidx17 = getelementptr inbounds double, double* %ra, i64 %idxprom16
-  %4 = load double, double* %arrayidx17, align 8
+  %arrayidx17 = getelementptr inbounds double, ptr %ra, i64 %idxprom16
+  %4 = load double, ptr %arrayidx17, align 8
   %cmp18 = fcmp olt double %3, %4
   br i1 %cmp18, label %if.then19, label %if.end20
 
@@ -1039,27 +1039,27 @@ if.then19:
 if.end20:
   %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
   %idxprom21 = sext i32 %j.1 to i64
-  %arrayidx22 = getelementptr inbounds double, double* %ra, i64 %idxprom21
-  %5 = load double, double* %arrayidx22, align 8
+  %arrayidx22 = getelementptr inbounds double, ptr %ra, i64 %idxprom21
+  %5 = load double, ptr %arrayidx22, align 8
   %cmp23 = fcmp olt double %rra.0, %5
   br i1 %cmp23, label %if.then24, label %while.cond
 
 if.then24:
   %idxprom27 = sext i32 %j.0.ph.in to i64
-  %arrayidx28 = getelementptr inbounds double, double* %ra, i64 %idxprom27
-  store double %5, double* %arrayidx28, align 8
+  %arrayidx28 = getelementptr inbounds double, ptr %ra, i64 %idxprom27
+  store double %5, ptr %arrayidx28, align 8
   br label %while.cond.outer
 
 while.end:
   %idxprom33 = sext i32 %j.0.ph.in to i64
-  %arrayidx34 = getelementptr inbounds double, double* %ra, i64 %idxprom33
-  store double %rra.0, double* %arrayidx34, align 8
+  %arrayidx34 = getelementptr inbounds double, ptr %ra, i64 %idxprom33
+  store double %rra.0, ptr %arrayidx34, align 8
   br label %for.cond
 }
 
 declare void @cold_function() cold
 
-define i32 @test_cold_calls(i32* %a) {
+define i32 @test_cold_calls(ptr %a) {
 ; Test that edges to blocks post-dominated by cold calls are
 ; marked as not expected to be taken.  They should be laid out
 ; at the bottom.
@@ -1070,8 +1070,8 @@ define i32 @test_cold_calls(i32* %a) {
 ; CHECK: %then
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then, label %else
 
@@ -1080,8 +1080,8 @@ then:
   br label %exit
 
 else:
-  %gep2 = getelementptr i32, i32* %a, i32 2
-  %val2 = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %a, i32 2
+  %val2 = load i32, ptr %gep2
   br label %exit
 
 exit:
@@ -1096,7 +1096,7 @@ declare i32 @foo();
 
 declare i32 @bar();
 
-define i32 @test_lp(i32 %a) personality i32 (...)* @pers {
+define i32 @test_lp(i32 %a) personality ptr @pers {
 ; CHECK-LABEL: test_lp:
 ; CHECK: %entry
 ; CHECK: %hot
@@ -1122,12 +1122,12 @@ then:
   ret i32 %3
 
 hotlp:
-  %4 = landingpad { i8*, i32 }
+  %4 = landingpad { ptr, i32 }
           cleanup
   br label %lpret
 
 coldlp:
-  %5 = landingpad { i8*, i32 }
+  %5 = landingpad { ptr, i32 }
           cleanup
   br label %lpret
 
@@ -1143,7 +1143,7 @@ lpret:
 ; to the most probable one. See selectBestCandidateBlock as to why.
 declare void @clean();
 
-define void @test_flow_unwind() personality i32 (...)* @pers {
+define void @test_flow_unwind() personality ptr @pers {
 ; CHECK-LABEL: test_flow_unwind:
 ; CHECK: %entry
 ; CHECK: %then
@@ -1163,19 +1163,19 @@ exit:
   ret void
 
 innerlp:
-  %2 = landingpad { i8*, i32 }
+  %2 = landingpad { ptr, i32 }
           cleanup
   br label %innercleanup
 
 outerlp:
-  %3 = landingpad { i8*, i32 }
+  %3 = landingpad { ptr, i32 }
           cleanup
   br label %outercleanup
 
 outercleanup:
-  %4 = phi { i8*, i32 } [%2, %innercleanup], [%3, %outerlp]
+  %4 = phi { ptr, i32 } [%2, %innercleanup], [%3, %outerlp]
   call void @clean()
-  resume { i8*, i32 } %4
+  resume { ptr, i32 } %4
 
 innercleanup:
   call void @clean()
@@ -1184,7 +1184,7 @@ innercleanup:
 
 declare void @hot_function()
 
-define void @test_hot_branch(i32* %a) {
+define void @test_hot_branch(ptr %a) {
 ; Test that a hot branch that has a probability a little larger than 80% will
 ; break CFG constrains when doing block placement.
 ; CHECK-LABEL: test_hot_branch:
@@ -1194,8 +1194,8 @@ define void @test_hot_branch(i32* %a) {
 ; CHECK: %else
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then, label %else, !prof !5
 
@@ -1212,7 +1212,7 @@ exit:
   ret void
 }
 
-define void @test_hot_branch_profile(i32* %a) !prof !6 {
+define void @test_hot_branch_profile(ptr %a) !prof !6 {
 ; Test that a hot branch that has a probability a little larger than 50% will
 ; break CFG constrains when doing block placement when profile is available.
 ; CHECK-LABEL: test_hot_branch_profile:
@@ -1222,8 +1222,8 @@ define void @test_hot_branch_profile(i32* %a) !prof !6 {
 ; CHECK: %else
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then, label %else, !prof !7
 
@@ -1240,7 +1240,7 @@ exit:
   ret void
 }
 
-define void @test_hot_branch_triangle_profile(i32* %a) !prof !6 {
+define void @test_hot_branch_triangle_profile(ptr %a) !prof !6 {
 ; Test that a hot branch that has a probability a little larger than 80% will
 ; break triangle shaped CFG constrains when doing block placement if profile
 ; is present.
@@ -1250,8 +1250,8 @@ define void @test_hot_branch_triangle_profile(i32* %a) !prof !6 {
 ; CHECK: %then
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %exit, label %then, !prof !5
 
@@ -1264,7 +1264,7 @@ exit:
   ret void
 }
 
-define void @test_hot_branch_triangle_profile_topology(i32* %a) !prof !6 {
+define void @test_hot_branch_triangle_profile_topology(ptr %a) !prof !6 {
 ; Test that a hot branch that has a probability between 50% and 66% will not
 ; break triangle shaped CFG constrains when doing block placement if profile
 ; is present.
@@ -1274,8 +1274,8 @@ define void @test_hot_branch_triangle_profile_topology(i32* %a) !prof !6 {
 ; CHECK: %exit
 
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %exit, label %then, !prof !7
 
@@ -1291,7 +1291,7 @@ exit:
 declare void @a()
 declare void @b()
 
-define void @test_forked_hot_diamond(i32* %a) {
+define void @test_forked_hot_diamond(ptr %a) {
 ; Test that a hot-branch with probability > 80% followed by a 50/50 branch
 ; will not place the cold predecessor if the probability for the fallthrough
 ; remains above 80%
@@ -1303,22 +1303,22 @@ define void @test_forked_hot_diamond(i32* %a) {
 ; CHECK: %fork2
 ; CHECK: %exit
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then, label %else, !prof !5
 
 then:
   call void @hot_function()
-  %gep2 = getelementptr i32, i32* %a, i32 2
-  %val2 = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %a, i32 2
+  %val2 = load i32, ptr %gep2
   %cond2 = icmp ugt i32 %val2, 2
   br i1 %cond2, label %fork1, label %fork2, !prof !8
 
 else:
   call void @cold_function()
-  %gep3 = getelementptr i32, i32* %a, i32 3
-  %val3 = load i32, i32* %gep3
+  %gep3 = getelementptr i32, ptr %a, i32 3
+  %val3 = load i32, ptr %gep3
   %cond3 = icmp ugt i32 %val3, 3
   br i1 %cond3, label %fork1, label %fork2, !prof !8
 
@@ -1335,7 +1335,7 @@ exit:
   ret void
 }
 
-define void @test_forked_hot_diamond_gets_cold(i32* %a) {
+define void @test_forked_hot_diamond_gets_cold(ptr %a) {
 ; Test that a hot-branch with probability > 80% followed by a 50/50 branch
 ; will place the cold predecessor if the probability for the fallthrough
 ; falls below 80%
@@ -1356,15 +1356,15 @@ define void @test_forked_hot_diamond_gets_cold(i32* %a) {
 ; CHECK: %fork2
 ; CHECK: %exit
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then1, label %else1, !prof !9
 
 then1:
   call void @hot_function()
-  %gep2 = getelementptr i32, i32* %a, i32 2
-  %val2 = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %a, i32 2
+  %val2 = load i32, ptr %gep2
   %cond2 = icmp ugt i32 %val2, 2
   br i1 %cond2, label %then2, label %else2, !prof !9
 
@@ -1374,8 +1374,8 @@ else1:
 
 then2:
   call void @hot_function()
-  %gep3 = getelementptr i32, i32* %a, i32 3
-  %val3 = load i32, i32* %gep2
+  %gep3 = getelementptr i32, ptr %a, i32 3
+  %val3 = load i32, ptr %gep2
   %cond3 = icmp ugt i32 %val2, 3
   br i1 %cond3, label %fork1, label %fork2, !prof !8
 
@@ -1396,7 +1396,7 @@ exit:
   ret void
 }
 
-define void @test_forked_hot_diamond_stays_hot(i32* %a) {
+define void @test_forked_hot_diamond_stays_hot(ptr %a) {
 ; Test that a hot-branch with probability > 88.88% (1:8) followed by a 50/50
 ; branch will not place the cold predecessor as the probability for the
 ; fallthrough stays above 80%
@@ -1413,15 +1413,15 @@ define void @test_forked_hot_diamond_stays_hot(i32* %a) {
 ; CHECK: %fork2
 ; CHECK: %exit
 entry:
-  %gep1 = getelementptr i32, i32* %a, i32 1
-  %val1 = load i32, i32* %gep1
+  %gep1 = getelementptr i32, ptr %a, i32 1
+  %val1 = load i32, ptr %gep1
   %cond1 = icmp ugt i32 %val1, 1
   br i1 %cond1, label %then1, label %else1, !prof !10
 
 then1:
   call void @hot_function()
-  %gep2 = getelementptr i32, i32* %a, i32 2
-  %val2 = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %a, i32 2
+  %val2 = load i32, ptr %gep2
   %cond2 = icmp ugt i32 %val2, 2
   br i1 %cond2, label %then2, label %else2, !prof !10
 
@@ -1431,8 +1431,8 @@ else1:
 
 then2:
   call void @hot_function()
-  %gep3 = getelementptr i32, i32* %a, i32 3
-  %val3 = load i32, i32* %gep2
+  %gep3 = getelementptr i32, ptr %a, i32 3
+  %val3 = load i32, ptr %gep2
   %cond3 = icmp ugt i32 %val2, 3
   br i1 %cond3, label %fork1, label %fork2, !prof !8
 

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-sink.ll b/llvm/test/CodeGen/X86/callbr-asm-sink.ll
index e8fd9268fb618..a563838cfdf5f 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-sink.ll
+++ b/llvm/test/CodeGen/X86/callbr-asm-sink.ll
@@ -5,9 +5,9 @@
 ;; getelementptr don't get sunk below the callbr. (Reduced from a bug
 ;; report.)
 
-%struct1 = type { i8*, i32 }
+%struct1 = type { ptr, i32 }
 
-define void @klist_dec_and_del(%struct1*) {
+define void @klist_dec_and_del(ptr) {
 ; CHECK-LABEL: klist_dec_and_del:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    leaq 8(%rdi), %rax
@@ -20,14 +20,14 @@ define void @klist_dec_and_del(%struct1*) {
 ; CHECK-NEXT:    # Label of block must be emitted
 ; CHECK-NEXT:    movq $0, -8(%rax)
 ; CHECK-NEXT:    retq
-  %2 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 1
-  callbr void asm sideeffect "# $0 $1", "*m,!i,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %2)
+  %2 = getelementptr inbounds %struct1, ptr %0, i64 0, i32 1
+  callbr void asm sideeffect "# $0 $1", "*m,!i,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %2)
           to label %6 [label %3]
 
 3:
-  %4 = getelementptr i32, i32* %2, i64 -2
-  %5 = bitcast i32* %4 to i8**
-  store i8* null, i8** %5, align 8
+  %4 = getelementptr i32, ptr %2, i64 -2
+  %5 = bitcast ptr %4 to ptr
+  store ptr null, ptr %5, align 8
   br label %6
 
 6:

diff  --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index 89879c7f43364..cd1953bec774d 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -1162,7 +1162,7 @@ declare i32 @f()
 ; Make sure we fold the load+and into a test from memory.
 ; The store makes sure the chain result of the load is used which used to
 ; prevent the post isel peephole from catching this.
-define i1 @fold_test_and_with_chain(i32* %x, i32* %y, i32 %z) {
+define i1 @fold_test_and_with_chain(ptr %x, ptr %y, i32 %z) {
 ; CHECK-LABEL: fold_test_and_with_chain:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    testl %edx, (%rdi) # encoding: [0x85,0x17]
@@ -1176,9 +1176,9 @@ define i1 @fold_test_and_with_chain(i32* %x, i32* %y, i32 %z) {
 ; NDD-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; NDD-NEXT:    movl %edx, (%rsi) # encoding: [0x89,0x16]
 ; NDD-NEXT:    retq # encoding: [0xc3]
-  %a = load i32, i32* %x
+  %a = load i32, ptr %x
   %b = and i32 %z, %a
   %c = icmp eq i32 %b, 0
-  store i32 %z, i32* %y
+  store i32 %z, ptr %y
   ret i1 %c
 }

diff  --git a/llvm/test/CodeGen/X86/code-model-kernel.ll b/llvm/test/CodeGen/X86/code-model-kernel.ll
index fa5c0e47e41eb..53b832374b1f9 100644
--- a/llvm/test/CodeGen/X86/code-model-kernel.ll
+++ b/llvm/test/CodeGen/X86/code-model-kernel.ll
@@ -7,42 +7,42 @@
 ; CHECK: .byte	0                       # @TType Encoding = absptr
 ; CHECK: .quad	_ZTIi
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
 ; Function Attrs: noinline norecurse optnone uwtable
-define i32 @main() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @main() #0 personality ptr @__gxx_personality_v0 {
   %1 = alloca i32, align 4
-  %2 = alloca i8*
+  %2 = alloca ptr
   %3 = alloca i32
   %4 = alloca i32, align 4
-  store i32 0, i32* %1, align 4
-  %5 = call i8* @__cxa_allocate_exception(i64 4) #2
-  %6 = bitcast i8* %5 to i32*
-  store i32 20, i32* %6, align 16
-  invoke void @__cxa_throw(i8* %5, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #3
+  store i32 0, ptr %1, align 4
+  %5 = call ptr @__cxa_allocate_exception(i64 4) #2
+  %6 = bitcast ptr %5 to ptr
+  store i32 20, ptr %6, align 16
+  invoke void @__cxa_throw(ptr %5, ptr @_ZTIi, ptr null) #3
           to label %26 unwind label %7
 
 ; <label>:7:                                      ; preds = %0
-  %8 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @_ZTIi to i8*)
-  %9 = extractvalue { i8*, i32 } %8, 0
-  store i8* %9, i8** %2, align 8
-  %10 = extractvalue { i8*, i32 } %8, 1
-  store i32 %10, i32* %3, align 4
+  %8 = landingpad { ptr, i32 }
+          catch ptr @_ZTIi
+  %9 = extractvalue { ptr, i32 } %8, 0
+  store ptr %9, ptr %2, align 8
+  %10 = extractvalue { ptr, i32 } %8, 1
+  store i32 %10, ptr %3, align 4
   br label %11
 
 ; <label>:11:                                     ; preds = %7
-  %12 = load i32, i32* %3, align 4
-  %13 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #2
+  %12 = load i32, ptr %3, align 4
+  %13 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #2
   %14 = icmp eq i32 %12, %13
   br i1 %14, label %15, label %21
 
 ; <label>:15:                                     ; preds = %11
-  %16 = load i8*, i8** %2, align 8
-  %17 = call i8* @__cxa_begin_catch(i8* %16) #2
-  %18 = bitcast i8* %17 to i32*
-  %19 = load i32, i32* %18, align 4
-  store i32 %19, i32* %4, align 4
+  %16 = load ptr, ptr %2, align 8
+  %17 = call ptr @__cxa_begin_catch(ptr %16) #2
+  %18 = bitcast ptr %17 to ptr
+  %19 = load i32, ptr %18, align 4
+  store i32 %19, ptr %4, align 4
   call void @__cxa_end_catch() #2
   br label %20
 
@@ -50,26 +50,26 @@ define i32 @main() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0
   ret i32 0
 
 ; <label>:21:                                     ; preds = %11
-  %22 = load i8*, i8** %2, align 8
-  %23 = load i32, i32* %3, align 4
-  %24 = insertvalue { i8*, i32 } undef, i8* %22, 0
-  %25 = insertvalue { i8*, i32 } %24, i32 %23, 1
-  resume { i8*, i32 } %25
+  %22 = load ptr, ptr %2, align 8
+  %23 = load i32, ptr %3, align 4
+  %24 = insertvalue { ptr, i32 } undef, ptr %22, 0
+  %25 = insertvalue { ptr, i32 } %24, i32 %23, 1
+  resume { ptr, i32 } %25
 
 ; <label>:26:                                     ; preds = %0
   unreachable
 }
 
-declare i8* @__cxa_allocate_exception(i64)
+declare ptr @__cxa_allocate_exception(i64)
 
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
 
 declare i32 @__gxx_personality_v0(...)
 
 ; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #1
+declare i32 @llvm.eh.typeid.for(ptr) #1
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()
 

diff  --git a/llvm/test/CodeGen/X86/code_placement.ll b/llvm/test/CodeGen/X86/code_placement.ll
index b0376c7704a20..6293dac1fcb06 100644
--- a/llvm/test/CodeGen/X86/code_placement.ll
+++ b/llvm/test/CodeGen/X86/code_placement.ll
@@ -9,11 +9,11 @@
 ; CHECK: %bb1
 ; CHECK: %bb2
 
-define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
+define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r) nounwind ssp {
 entry:
-	%0 = load i32, i32* %rk, align 4		; <i32> [#uses=1]
-	%1 = getelementptr i32, i32* %rk, i64 1		; <i32*> [#uses=1]
-	%2 = load i32, i32* %1, align 4		; <i32> [#uses=1]
+	%0 = load i32, ptr %rk, align 4		; <i32> [#uses=1]
+	%1 = getelementptr i32, ptr %rk, i64 1		; <i32*> [#uses=1]
+	%2 = load i32, ptr %1, align 4		; <i32> [#uses=1]
 	%tmp15 = add i32 %r, -1		; <i32> [#uses=1]
 	%tmp.16 = zext i32 %tmp15 to i64		; <i64> [#uses=2]
 	br label %bb
@@ -23,67 +23,67 @@ bb:		; preds = %bb1, %entry
 	%s1.0 = phi i32 [ %2, %entry ], [ %56, %bb1 ]		; <i32> [#uses=2]
 	%s0.0 = phi i32 [ %0, %entry ], [ %43, %bb1 ]		; <i32> [#uses=2]
 	%tmp18 = shl i64 %indvar, 4		; <i64> [#uses=4]
-	%rk26 = bitcast i32* %rk to i8*		; <i8*> [#uses=6]
+	%rk26 = bitcast ptr %rk to ptr		; <i8*> [#uses=6]
 	%3 = lshr i32 %s0.0, 24		; <i32> [#uses=1]
 	%4 = zext i32 %3 to i64		; <i64> [#uses=1]
-	%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4		; <i32*> [#uses=1]
-	%6 = load i32, i32* %5, align 4		; <i32> [#uses=1]
+	%5 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %4		; <i32*> [#uses=1]
+	%6 = load i32, ptr %5, align 4		; <i32> [#uses=1]
 	%7 = lshr i32 %s1.0, 16		; <i32> [#uses=1]
 	%8 = and i32 %7, 255		; <i32> [#uses=1]
 	%9 = zext i32 %8 to i64		; <i64> [#uses=1]
-	%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9		; <i32*> [#uses=1]
-	%11 = load i32, i32* %10, align 4		; <i32> [#uses=1]
+	%10 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %9		; <i32*> [#uses=1]
+	%11 = load i32, ptr %10, align 4		; <i32> [#uses=1]
 	%ctg2.sum2728 = or i64 %tmp18, 8		; <i64> [#uses=1]
-	%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728		; <i8*> [#uses=1]
-	%13 = bitcast i8* %12 to i32*		; <i32*> [#uses=1]
-	%14 = load i32, i32* %13, align 4		; <i32> [#uses=1]
+	%12 = getelementptr i8, ptr %rk26, i64 %ctg2.sum2728		; <i8*> [#uses=1]
+	%13 = bitcast ptr %12 to ptr		; <i32*> [#uses=1]
+	%14 = load i32, ptr %13, align 4		; <i32> [#uses=1]
 	%15 = xor i32 %11, %6		; <i32> [#uses=1]
 	%16 = xor i32 %15, %14		; <i32> [#uses=3]
 	%17 = lshr i32 %s1.0, 24		; <i32> [#uses=1]
 	%18 = zext i32 %17 to i64		; <i64> [#uses=1]
-	%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18		; <i32*> [#uses=1]
-	%20 = load i32, i32* %19, align 4		; <i32> [#uses=1]
+	%19 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %18		; <i32*> [#uses=1]
+	%20 = load i32, ptr %19, align 4		; <i32> [#uses=1]
 	%21 = and i32 %s0.0, 255		; <i32> [#uses=1]
 	%22 = zext i32 %21 to i64		; <i64> [#uses=1]
-	%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22		; <i32*> [#uses=1]
-	%24 = load i32, i32* %23, align 4		; <i32> [#uses=1]
+	%23 = getelementptr [256 x i32], ptr @Te3, i64 0, i64 %22		; <i32*> [#uses=1]
+	%24 = load i32, ptr %23, align 4		; <i32> [#uses=1]
 	%ctg2.sum2930 = or i64 %tmp18, 12		; <i64> [#uses=1]
-	%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930		; <i8*> [#uses=1]
-	%26 = bitcast i8* %25 to i32*		; <i32*> [#uses=1]
-	%27 = load i32, i32* %26, align 4		; <i32> [#uses=1]
+	%25 = getelementptr i8, ptr %rk26, i64 %ctg2.sum2930		; <i8*> [#uses=1]
+	%26 = bitcast ptr %25 to ptr		; <i32*> [#uses=1]
+	%27 = load i32, ptr %26, align 4		; <i32> [#uses=1]
 	%28 = xor i32 %24, %20		; <i32> [#uses=1]
 	%29 = xor i32 %28, %27		; <i32> [#uses=4]
 	%30 = lshr i32 %16, 24		; <i32> [#uses=1]
 	%31 = zext i32 %30 to i64		; <i64> [#uses=1]
-	%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31		; <i32*> [#uses=1]
-	%33 = load i32, i32* %32, align 4		; <i32> [#uses=2]
+	%32 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %31		; <i32*> [#uses=1]
+	%33 = load i32, ptr %32, align 4		; <i32> [#uses=2]
 	%exitcond = icmp eq i64 %indvar, %tmp.16		; <i1> [#uses=1]
 	br i1 %exitcond, label %bb2, label %bb1
 
 bb1:		; preds = %bb
 	%ctg2.sum31 = add i64 %tmp18, 16		; <i64> [#uses=1]
-	%34 = getelementptr i8, i8* %rk26, i64 %ctg2.sum31		; <i8*> [#uses=1]
-	%35 = bitcast i8* %34 to i32*		; <i32*> [#uses=1]
+	%34 = getelementptr i8, ptr %rk26, i64 %ctg2.sum31		; <i8*> [#uses=1]
+	%35 = bitcast ptr %34 to ptr		; <i32*> [#uses=1]
 	%36 = lshr i32 %29, 16		; <i32> [#uses=1]
 	%37 = and i32 %36, 255		; <i32> [#uses=1]
 	%38 = zext i32 %37 to i64		; <i64> [#uses=1]
-	%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38		; <i32*> [#uses=1]
-	%40 = load i32, i32* %39, align 4		; <i32> [#uses=1]
-	%41 = load i32, i32* %35, align 4		; <i32> [#uses=1]
+	%39 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %38		; <i32*> [#uses=1]
+	%40 = load i32, ptr %39, align 4		; <i32> [#uses=1]
+	%41 = load i32, ptr %35, align 4		; <i32> [#uses=1]
 	%42 = xor i32 %40, %33		; <i32> [#uses=1]
 	%43 = xor i32 %42, %41		; <i32> [#uses=1]
 	%44 = lshr i32 %29, 24		; <i32> [#uses=1]
 	%45 = zext i32 %44 to i64		; <i64> [#uses=1]
-	%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45		; <i32*> [#uses=1]
-	%47 = load i32, i32* %46, align 4		; <i32> [#uses=1]
+	%46 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %45		; <i32*> [#uses=1]
+	%47 = load i32, ptr %46, align 4		; <i32> [#uses=1]
 	%48 = and i32 %16, 255		; <i32> [#uses=1]
 	%49 = zext i32 %48 to i64		; <i64> [#uses=1]
-	%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49		; <i32*> [#uses=1]
-	%51 = load i32, i32* %50, align 4		; <i32> [#uses=1]
+	%50 = getelementptr [256 x i32], ptr @Te3, i64 0, i64 %49		; <i32*> [#uses=1]
+	%51 = load i32, ptr %50, align 4		; <i32> [#uses=1]
 	%ctg2.sum32 = add i64 %tmp18, 20		; <i64> [#uses=1]
-	%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32		; <i8*> [#uses=1]
-	%53 = bitcast i8* %52 to i32*		; <i32*> [#uses=1]
-	%54 = load i32, i32* %53, align 4		; <i32> [#uses=1]
+	%52 = getelementptr i8, ptr %rk26, i64 %ctg2.sum32		; <i8*> [#uses=1]
+	%53 = bitcast ptr %52 to ptr		; <i32*> [#uses=1]
+	%54 = load i32, ptr %53, align 4		; <i32> [#uses=1]
 	%55 = xor i32 %51, %47		; <i32> [#uses=1]
 	%56 = xor i32 %55, %54		; <i32> [#uses=1]
 	%indvar.next = add i64 %indvar, 1		; <i64> [#uses=1]
@@ -92,48 +92,48 @@ bb1:		; preds = %bb
 bb2:		; preds = %bb
 	%tmp10 = shl i64 %tmp.16, 4		; <i64> [#uses=2]
 	%ctg2.sum = add i64 %tmp10, 16		; <i64> [#uses=1]
-	%tmp1213 = getelementptr i8, i8* %rk26, i64 %ctg2.sum		; <i8*> [#uses=1]
-	%57 = bitcast i8* %tmp1213 to i32*		; <i32*> [#uses=1]
+	%tmp1213 = getelementptr i8, ptr %rk26, i64 %ctg2.sum		; <i8*> [#uses=1]
+	%57 = bitcast ptr %tmp1213 to ptr		; <i32*> [#uses=1]
 	%58 = and i32 %33, -16777216		; <i32> [#uses=1]
 	%59 = lshr i32 %29, 16		; <i32> [#uses=1]
 	%60 = and i32 %59, 255		; <i32> [#uses=1]
 	%61 = zext i32 %60 to i64		; <i64> [#uses=1]
-	%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61		; <i32*> [#uses=1]
-	%63 = load i32, i32* %62, align 4		; <i32> [#uses=1]
+	%62 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %61		; <i32*> [#uses=1]
+	%63 = load i32, ptr %62, align 4		; <i32> [#uses=1]
 	%64 = and i32 %63, 16711680		; <i32> [#uses=1]
 	%65 = or i32 %64, %58		; <i32> [#uses=1]
-	%66 = load i32, i32* %57, align 4		; <i32> [#uses=1]
+	%66 = load i32, ptr %57, align 4		; <i32> [#uses=1]
 	%67 = xor i32 %65, %66		; <i32> [#uses=2]
 	%68 = lshr i32 %29, 8		; <i32> [#uses=1]
 	%69 = zext i32 %68 to i64		; <i64> [#uses=1]
-	%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69		; <i32*> [#uses=1]
-	%71 = load i32, i32* %70, align 4		; <i32> [#uses=1]
+	%70 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %69		; <i32*> [#uses=1]
+	%71 = load i32, ptr %70, align 4		; <i32> [#uses=1]
 	%72 = and i32 %71, -16777216		; <i32> [#uses=1]
 	%73 = and i32 %16, 255		; <i32> [#uses=1]
 	%74 = zext i32 %73 to i64		; <i64> [#uses=1]
-	%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74		; <i32*> [#uses=1]
-	%76 = load i32, i32* %75, align 4		; <i32> [#uses=1]
+	%75 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %74		; <i32*> [#uses=1]
+	%76 = load i32, ptr %75, align 4		; <i32> [#uses=1]
 	%77 = and i32 %76, 16711680		; <i32> [#uses=1]
 	%78 = or i32 %77, %72		; <i32> [#uses=1]
 	%ctg2.sum25 = add i64 %tmp10, 20		; <i64> [#uses=1]
-	%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25		; <i8*> [#uses=1]
-	%80 = bitcast i8* %79 to i32*		; <i32*> [#uses=1]
-	%81 = load i32, i32* %80, align 4		; <i32> [#uses=1]
+	%79 = getelementptr i8, ptr %rk26, i64 %ctg2.sum25		; <i8*> [#uses=1]
+	%80 = bitcast ptr %79 to ptr		; <i32*> [#uses=1]
+	%81 = load i32, ptr %80, align 4		; <i32> [#uses=1]
 	%82 = xor i32 %78, %81		; <i32> [#uses=2]
 	%83 = lshr i32 %67, 24		; <i32> [#uses=1]
 	%84 = trunc i32 %83 to i8		; <i8> [#uses=1]
-	store i8 %84, i8* %out, align 1
+	store i8 %84, ptr %out, align 1
 	%85 = lshr i32 %67, 16		; <i32> [#uses=1]
 	%86 = trunc i32 %85 to i8		; <i8> [#uses=1]
-	%87 = getelementptr i8, i8* %out, i64 1		; <i8*> [#uses=1]
-	store i8 %86, i8* %87, align 1
-	%88 = getelementptr i8, i8* %out, i64 4		; <i8*> [#uses=1]
+	%87 = getelementptr i8, ptr %out, i64 1		; <i8*> [#uses=1]
+	store i8 %86, ptr %87, align 1
+	%88 = getelementptr i8, ptr %out, i64 4		; <i8*> [#uses=1]
 	%89 = lshr i32 %82, 24		; <i32> [#uses=1]
 	%90 = trunc i32 %89 to i8		; <i8> [#uses=1]
-	store i8 %90, i8* %88, align 1
+	store i8 %90, ptr %88, align 1
 	%91 = lshr i32 %82, 16		; <i32> [#uses=1]
 	%92 = trunc i32 %91 to i8		; <i8> [#uses=1]
-	%93 = getelementptr i8, i8* %out, i64 5		; <i8*> [#uses=1]
-	store i8 %92, i8* %93, align 1
+	%93 = getelementptr i8, ptr %out, i64 5		; <i8*> [#uses=1]
+	store i8 %92, ptr %93, align 1
 	ret void
 }

diff  --git a/llvm/test/CodeGen/X86/complex-asm.ll b/llvm/test/CodeGen/X86/complex-asm.ll
index ba5e719d68ae6..2d4612bce14d0 100644
--- a/llvm/test/CodeGen/X86/complex-asm.ll
+++ b/llvm/test/CodeGen/X86/complex-asm.ll
@@ -6,11 +6,11 @@
 define %0 @f() nounwind ssp {
 entry:
   %v = alloca %0, align 8
-  call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* elementtype(%0) %v, i32 0, i32 1, i128 undef) nounwind
-  %0 = getelementptr inbounds %0, %0* %v, i64 0, i32 0
-  %1 = load i64, i64* %0, align 8
-  %2 = getelementptr inbounds %0, %0* %v, i64 0, i32 1
-  %3 = load i64, i64* %2, align 8
+  call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(%0) %v, i32 0, i32 1, i128 undef) nounwind
+  %0 = getelementptr inbounds %0, ptr %v, i64 0, i32 0
+  %1 = load i64, ptr %0, align 8
+  %2 = getelementptr inbounds %0, ptr %v, i64 0, i32 1
+  %3 = load i64, ptr %2, align 8
   %mrv4 = insertvalue %0 undef, i64 %1, 0
   %mrv5 = insertvalue %0 %mrv4, i64 %3, 1
   ret %0 %mrv5

diff  --git a/llvm/test/CodeGen/X86/crash.ll b/llvm/test/CodeGen/X86/crash.ll
index d9e5097349c33..16e3bb6e50aee 100644
--- a/llvm/test/CodeGen/X86/crash.ll
+++ b/llvm/test/CodeGen/X86/crash.ll
@@ -438,7 +438,7 @@ entry:
   %conv = uitofp i64 %sub to float
   %div = fmul float %conv, 5.000000e-01
   %conv2 = fpext float %div to double
-  tail call void (...) @_Z6PrintFz(ptr getelementptr inbounds ({ [1 x i8], [63 x i8] }, ptr @.str, i64 0, i32 0, i64 0), double %conv2)
+  tail call void (...) @_Z6PrintFz(ptr @.str, double %conv2)
   ret void
 }
 declare void @_Z6PrintFz(...)

diff  --git a/llvm/test/CodeGen/X86/fastisel-memset-flush.ll b/llvm/test/CodeGen/X86/fastisel-memset-flush.ll
index f74215dbcc7d4..06feaad13b44b 100644
--- a/llvm/test/CodeGen/X86/fastisel-memset-flush.ll
+++ b/llvm/test/CodeGen/X86/fastisel-memset-flush.ll
@@ -3,11 +3,11 @@
 define dso_local void @foo() !dbg !7 {
 entry:
   %a = alloca i32, align 4
-  store i32 0, i32* %a, align 4, !dbg !9
-  %0 = bitcast i32* %a to i8*, !dbg !10
-  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 -86, i64 4, i1 false), !dbg !10
-  %1 = bitcast i32* %a to i8*, !dbg !11
-  call void @other(i8* %1), !dbg !12
+  store i32 0, ptr %a, align 4, !dbg !9
+  %0 = bitcast ptr %a to ptr, !dbg !10
+  call void @llvm.memset.p0.i64(ptr align 4 %0, i8 -86, i64 4, i1 false), !dbg !10
+  %1 = bitcast ptr %a to ptr, !dbg !11
+  call void @other(ptr %1), !dbg !12
   ret void, !dbg !13
 }
 ; CHECK:      callq memset
@@ -16,9 +16,9 @@ entry:
 ; CHECK-NEXT: .loc 1 9 3
 ; CHECK-NEXT: callq other
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
-declare dso_local void @other(i8*)
+declare dso_local void @other(ptr)
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!3, !4, !5}

diff  --git a/llvm/test/CodeGen/X86/function-alias.ll b/llvm/test/CodeGen/X86/function-alias.ll
index ddcffa6722ce2..9191c0bc27f6e 100644
--- a/llvm/test/CodeGen/X86/function-alias.ll
+++ b/llvm/test/CodeGen/X86/function-alias.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
 @0 = private constant <{ i8, i8 }> <{i8 15, i8 11}>, section ".text"
 
 ; function-typed alias
- at ud2 = alias void (), ptr bitcast (<{ i8, i8 }>* @0 to ptr)
+ at ud2 = alias void (), ptr @0
 
 ; Check that "ud2" is emitted as a function symbol.
 ; CHECK: .type{{.*}}ud2, at function

diff  --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll
index 4123890ed1a76..c6f0662cadd6b 100644
--- a/llvm/test/CodeGen/X86/funnel-shift.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift.ll
@@ -973,7 +973,7 @@ define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounw
 }
 
 %struct.S = type { [11 x i8], i8 }
-define void @PR45265(i32 %0, %struct.S* nocapture readonly %1) nounwind {
+define void @PR45265(i32 %0, ptr nocapture readonly %1) nounwind {
 ; X86-SSE2-LABEL: PR45265:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    pushl %edi
@@ -1021,9 +1021,9 @@ define void @PR45265(i32 %0, %struct.S* nocapture readonly %1) nounwind {
 ; X64-AVX2-NEXT:  # %bb.1:
 ; X64-AVX2-NEXT:    retq
   %3 = sext i32 %0 to i64
-  %4 = getelementptr inbounds %struct.S, %struct.S* %1, i64 %3
-  %5 = bitcast %struct.S* %4 to i88*
-  %6 = load i88, i88* %5, align 1
+  %4 = getelementptr inbounds %struct.S, ptr %1, i64 %3
+  %5 = bitcast ptr %4 to ptr
+  %6 = load i88, ptr %5, align 1
   %7 = ashr i88 %6, 40
   %8 = trunc i88 %7 to i64
   %9 = icmp eq i64 %8, %3

diff  --git a/llvm/test/CodeGen/X86/large-constants-x32.ll b/llvm/test/CodeGen/X86/large-constants-x32.ll
index 95a75006e085a..5fe641aac1415 100644
--- a/llvm/test/CodeGen/X86/large-constants-x32.ll
+++ b/llvm/test/CodeGen/X86/large-constants-x32.ll
@@ -16,14 +16,14 @@ define void @constant_expressions() {
 ; CHECK-NEXT:    movl %edx, (%eax)
 ; CHECK-NEXT:    retq
 entry:
-  %0 = load i32, i32* inttoptr (i32 add (i32 -289477652, i32 0) to i32*)
-  %1 = load i32, i32* inttoptr (i32 add (i32 -289477652, i32 4) to i32*)
-  %2 = load i32, i32* inttoptr (i32 add (i32 -289477652, i32 8) to i32*)
-  %3 = load i32, i32* inttoptr (i32 add (i32 -289477652, i32 16) to i32*)
+  %0 = load i32, ptr inttoptr (i32 add (i32 -289477652, i32 0) to ptr)
+  %1 = load i32, ptr inttoptr (i32 add (i32 -289477652, i32 4) to ptr)
+  %2 = load i32, ptr inttoptr (i32 add (i32 -289477652, i32 8) to ptr)
+  %3 = load i32, ptr inttoptr (i32 add (i32 -289477652, i32 16) to ptr)
   %4 = add i32 %0, %1
   %5 = add i32 %2, %3
   %6 = add i32 %4, %5
-  store i32 %6, i32* inttoptr (i32 add (i32 -289477652, i32 0) to i32*)
+  store i32 %6, ptr inttoptr (i32 add (i32 -289477652, i32 0) to ptr)
   ret void
 }
 
@@ -43,13 +43,13 @@ define void @constant_expressions2() {
 ; CHECK-NEXT:    movl %edx, (%eax)
 ; CHECK-NEXT:    retq
 entry:
-  %0 = load i32, i32* inttoptr (i32 -289477652 to i32*)
-  %1 = load i32, i32* inttoptr (i32 -289477648 to i32*)
-  %2 = load i32, i32* inttoptr (i32 -289477644 to i32*)
-  %3 = load i32, i32* inttoptr (i32 -289477640 to i32*)
+  %0 = load i32, ptr inttoptr (i32 -289477652 to ptr)
+  %1 = load i32, ptr inttoptr (i32 -289477648 to ptr)
+  %2 = load i32, ptr inttoptr (i32 -289477644 to ptr)
+  %3 = load i32, ptr inttoptr (i32 -289477640 to ptr)
   %4 = add i32 %0, %1
   %5 = add i32 %2, %3
   %6 = add i32 %4, %5
-  store i32 %6, i32* inttoptr (i32 -289477652 to i32*)
+  store i32 %6, ptr inttoptr (i32 -289477652 to ptr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
index 85449b01265bf..14dfc046c029a 100644
--- a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
@@ -6,7 +6,7 @@
 @Te1 = external global [256 x i32]		; <[256 x i32]*> [#uses=4]
 @Te3 = external global [256 x i32]		; <[256 x i32]*> [#uses=2]
 
-define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind {
+define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r) nounwind {
 ; GENERIC-LABEL: t:
 ; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    pushq %rbp
@@ -173,9 +173,9 @@ define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r
 ; ATOM-NEXT:    popq %r15
 ; ATOM-NEXT:    retq
 entry:
-	%0 = load i32, i32* %rk, align 4		; <i32> [#uses=1]
-	%1 = getelementptr i32, i32* %rk, i64 1		; <i32*> [#uses=1]
-	%2 = load i32, i32* %1, align 4		; <i32> [#uses=1]
+	%0 = load i32, ptr %rk, align 4		; <i32> [#uses=1]
+	%1 = getelementptr i32, ptr %rk, i64 1		; <i32*> [#uses=1]
+	%2 = load i32, ptr %1, align 4		; <i32> [#uses=1]
 	%tmp15 = add i32 %r, -1		; <i32> [#uses=1]
 	%tmp.16 = zext i32 %tmp15 to i64		; <i64> [#uses=2]
 	br label %bb
@@ -185,67 +185,67 @@ bb:		; preds = %bb1, %entry
 	%s1.0 = phi i32 [ %2, %entry ], [ %56, %bb1 ]		; <i32> [#uses=2]
 	%s0.0 = phi i32 [ %0, %entry ], [ %43, %bb1 ]		; <i32> [#uses=2]
 	%tmp18 = shl i64 %indvar, 4		; <i64> [#uses=4]
-	%rk26 = bitcast i32* %rk to i8*		; <i8*> [#uses=6]
+	%rk26 = bitcast ptr %rk to ptr		; <i8*> [#uses=6]
 	%3 = lshr i32 %s0.0, 24		; <i32> [#uses=1]
 	%4 = zext i32 %3 to i64		; <i64> [#uses=1]
-	%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4		; <i32*> [#uses=1]
-	%6 = load i32, i32* %5, align 4		; <i32> [#uses=1]
+	%5 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %4		; <i32*> [#uses=1]
+	%6 = load i32, ptr %5, align 4		; <i32> [#uses=1]
 	%7 = lshr i32 %s1.0, 16		; <i32> [#uses=1]
 	%8 = and i32 %7, 255		; <i32> [#uses=1]
 	%9 = zext i32 %8 to i64		; <i64> [#uses=1]
-	%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9		; <i32*> [#uses=1]
-	%11 = load i32, i32* %10, align 4		; <i32> [#uses=1]
+	%10 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %9		; <i32*> [#uses=1]
+	%11 = load i32, ptr %10, align 4		; <i32> [#uses=1]
 	%ctg2.sum2728 = or disjoint i64 %tmp18, 8		; <i64> [#uses=1]
-	%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728		; <i8*> [#uses=1]
-	%13 = bitcast i8* %12 to i32*		; <i32*> [#uses=1]
-	%14 = load i32, i32* %13, align 4		; <i32> [#uses=1]
+	%12 = getelementptr i8, ptr %rk26, i64 %ctg2.sum2728		; <i8*> [#uses=1]
+	%13 = bitcast ptr %12 to ptr		; <i32*> [#uses=1]
+	%14 = load i32, ptr %13, align 4		; <i32> [#uses=1]
 	%15 = xor i32 %11, %6		; <i32> [#uses=1]
 	%16 = xor i32 %15, %14		; <i32> [#uses=3]
 	%17 = lshr i32 %s1.0, 24		; <i32> [#uses=1]
 	%18 = zext i32 %17 to i64		; <i64> [#uses=1]
-	%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18		; <i32*> [#uses=1]
-	%20 = load i32, i32* %19, align 4		; <i32> [#uses=1]
+	%19 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %18		; <i32*> [#uses=1]
+	%20 = load i32, ptr %19, align 4		; <i32> [#uses=1]
 	%21 = and i32 %s0.0, 255		; <i32> [#uses=1]
 	%22 = zext i32 %21 to i64		; <i64> [#uses=1]
-	%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22		; <i32*> [#uses=1]
-	%24 = load i32, i32* %23, align 4		; <i32> [#uses=1]
+	%23 = getelementptr [256 x i32], ptr @Te3, i64 0, i64 %22		; <i32*> [#uses=1]
+	%24 = load i32, ptr %23, align 4		; <i32> [#uses=1]
 	%ctg2.sum2930 = or disjoint i64 %tmp18, 12		; <i64> [#uses=1]
-	%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930		; <i8*> [#uses=1]
-	%26 = bitcast i8* %25 to i32*		; <i32*> [#uses=1]
-	%27 = load i32, i32* %26, align 4		; <i32> [#uses=1]
+	%25 = getelementptr i8, ptr %rk26, i64 %ctg2.sum2930		; <i8*> [#uses=1]
+	%26 = bitcast ptr %25 to ptr		; <i32*> [#uses=1]
+	%27 = load i32, ptr %26, align 4		; <i32> [#uses=1]
 	%28 = xor i32 %24, %20		; <i32> [#uses=1]
 	%29 = xor i32 %28, %27		; <i32> [#uses=4]
 	%30 = lshr i32 %16, 24		; <i32> [#uses=1]
 	%31 = zext i32 %30 to i64		; <i64> [#uses=1]
-	%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31		; <i32*> [#uses=1]
-	%33 = load i32, i32* %32, align 4		; <i32> [#uses=2]
+	%32 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %31		; <i32*> [#uses=1]
+	%33 = load i32, ptr %32, align 4		; <i32> [#uses=2]
 	%exitcond = icmp eq i64 %indvar, %tmp.16		; <i1> [#uses=1]
 	br i1 %exitcond, label %bb2, label %bb1
 
 bb1:		; preds = %bb
 	%ctg2.sum31 = add i64 %tmp18, 16		; <i64> [#uses=1]
-	%34 = getelementptr i8, i8* %rk26, i64 %ctg2.sum31		; <i8*> [#uses=1]
-	%35 = bitcast i8* %34 to i32*		; <i32*> [#uses=1]
+	%34 = getelementptr i8, ptr %rk26, i64 %ctg2.sum31		; <i8*> [#uses=1]
+	%35 = bitcast ptr %34 to ptr		; <i32*> [#uses=1]
 	%36 = lshr i32 %29, 16		; <i32> [#uses=1]
 	%37 = and i32 %36, 255		; <i32> [#uses=1]
 	%38 = zext i32 %37 to i64		; <i64> [#uses=1]
-	%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38		; <i32*> [#uses=1]
-	%40 = load i32, i32* %39, align 4		; <i32> [#uses=1]
-	%41 = load i32, i32* %35, align 4		; <i32> [#uses=1]
+	%39 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %38		; <i32*> [#uses=1]
+	%40 = load i32, ptr %39, align 4		; <i32> [#uses=1]
+	%41 = load i32, ptr %35, align 4		; <i32> [#uses=1]
 	%42 = xor i32 %40, %33		; <i32> [#uses=1]
 	%43 = xor i32 %42, %41		; <i32> [#uses=1]
 	%44 = lshr i32 %29, 24		; <i32> [#uses=1]
 	%45 = zext i32 %44 to i64		; <i64> [#uses=1]
-	%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45		; <i32*> [#uses=1]
-	%47 = load i32, i32* %46, align 4		; <i32> [#uses=1]
+	%46 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %45		; <i32*> [#uses=1]
+	%47 = load i32, ptr %46, align 4		; <i32> [#uses=1]
 	%48 = and i32 %16, 255		; <i32> [#uses=1]
 	%49 = zext i32 %48 to i64		; <i64> [#uses=1]
-	%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49		; <i32*> [#uses=1]
-	%51 = load i32, i32* %50, align 4		; <i32> [#uses=1]
+	%50 = getelementptr [256 x i32], ptr @Te3, i64 0, i64 %49		; <i32*> [#uses=1]
+	%51 = load i32, ptr %50, align 4		; <i32> [#uses=1]
 	%ctg2.sum32 = add i64 %tmp18, 20		; <i64> [#uses=1]
-	%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32		; <i8*> [#uses=1]
-	%53 = bitcast i8* %52 to i32*		; <i32*> [#uses=1]
-	%54 = load i32, i32* %53, align 4		; <i32> [#uses=1]
+	%52 = getelementptr i8, ptr %rk26, i64 %ctg2.sum32		; <i8*> [#uses=1]
+	%53 = bitcast ptr %52 to ptr		; <i32*> [#uses=1]
+	%54 = load i32, ptr %53, align 4		; <i32> [#uses=1]
 	%55 = xor i32 %51, %47		; <i32> [#uses=1]
 	%56 = xor i32 %55, %54		; <i32> [#uses=1]
 	%indvar.next = add i64 %indvar, 1		; <i64> [#uses=1]
@@ -254,49 +254,49 @@ bb1:		; preds = %bb
 bb2:		; preds = %bb
 	%tmp10 = shl i64 %tmp.16, 4		; <i64> [#uses=2]
 	%ctg2.sum = add i64 %tmp10, 16		; <i64> [#uses=1]
-	%tmp1213 = getelementptr i8, i8* %rk26, i64 %ctg2.sum		; <i8*> [#uses=1]
-	%57 = bitcast i8* %tmp1213 to i32*		; <i32*> [#uses=1]
+	%tmp1213 = getelementptr i8, ptr %rk26, i64 %ctg2.sum		; <i8*> [#uses=1]
+	%57 = bitcast ptr %tmp1213 to ptr		; <i32*> [#uses=1]
 	%58 = and i32 %33, -16777216		; <i32> [#uses=1]
 	%59 = lshr i32 %29, 16		; <i32> [#uses=1]
 	%60 = and i32 %59, 255		; <i32> [#uses=1]
 	%61 = zext i32 %60 to i64		; <i64> [#uses=1]
-	%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61		; <i32*> [#uses=1]
-	%63 = load i32, i32* %62, align 4		; <i32> [#uses=1]
+	%62 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %61		; <i32*> [#uses=1]
+	%63 = load i32, ptr %62, align 4		; <i32> [#uses=1]
 	%64 = and i32 %63, 16711680		; <i32> [#uses=1]
 	%65 = or i32 %64, %58		; <i32> [#uses=1]
-	%66 = load i32, i32* %57, align 4		; <i32> [#uses=1]
+	%66 = load i32, ptr %57, align 4		; <i32> [#uses=1]
 	%67 = xor i32 %65, %66		; <i32> [#uses=2]
 	%68 = lshr i32 %29, 8		; <i32> [#uses=1]
 	%69 = zext i32 %68 to i64		; <i64> [#uses=1]
-	%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69		; <i32*> [#uses=1]
-	%71 = load i32, i32* %70, align 4		; <i32> [#uses=1]
+	%70 = getelementptr [256 x i32], ptr @Te0, i64 0, i64 %69		; <i32*> [#uses=1]
+	%71 = load i32, ptr %70, align 4		; <i32> [#uses=1]
 	%72 = and i32 %71, -16777216		; <i32> [#uses=1]
 	%73 = and i32 %16, 255		; <i32> [#uses=1]
 	%74 = zext i32 %73 to i64		; <i64> [#uses=1]
-	%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74		; <i32*> [#uses=1]
-	%76 = load i32, i32* %75, align 4		; <i32> [#uses=1]
+	%75 = getelementptr [256 x i32], ptr @Te1, i64 0, i64 %74		; <i32*> [#uses=1]
+	%76 = load i32, ptr %75, align 4		; <i32> [#uses=1]
 	%77 = and i32 %76, 16711680		; <i32> [#uses=1]
 	%78 = or i32 %77, %72		; <i32> [#uses=1]
 	%ctg2.sum25 = add i64 %tmp10, 20		; <i64> [#uses=1]
-	%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25		; <i8*> [#uses=1]
-	%80 = bitcast i8* %79 to i32*		; <i32*> [#uses=1]
-	%81 = load i32, i32* %80, align 4		; <i32> [#uses=1]
+	%79 = getelementptr i8, ptr %rk26, i64 %ctg2.sum25		; <i8*> [#uses=1]
+	%80 = bitcast ptr %79 to ptr		; <i32*> [#uses=1]
+	%81 = load i32, ptr %80, align 4		; <i32> [#uses=1]
 	%82 = xor i32 %78, %81		; <i32> [#uses=2]
 	%83 = lshr i32 %67, 24		; <i32> [#uses=1]
 	%84 = trunc i32 %83 to i8		; <i8> [#uses=1]
-	store i8 %84, i8* %out, align 1
+	store i8 %84, ptr %out, align 1
 	%85 = lshr i32 %67, 16		; <i32> [#uses=1]
 	%86 = trunc i32 %85 to i8		; <i8> [#uses=1]
-	%87 = getelementptr i8, i8* %out, i64 1		; <i8*> [#uses=1]
-	store i8 %86, i8* %87, align 1
-	%88 = getelementptr i8, i8* %out, i64 4		; <i8*> [#uses=1]
+	%87 = getelementptr i8, ptr %out, i64 1		; <i8*> [#uses=1]
+	store i8 %86, ptr %87, align 1
+	%88 = getelementptr i8, ptr %out, i64 4		; <i8*> [#uses=1]
 	%89 = lshr i32 %82, 24		; <i32> [#uses=1]
 	%90 = trunc i32 %89 to i8		; <i8> [#uses=1]
-	store i8 %90, i8* %88, align 1
+	store i8 %90, ptr %88, align 1
 	%91 = lshr i32 %82, 16		; <i32> [#uses=1]
 	%92 = trunc i32 %91 to i8		; <i8> [#uses=1]
-	%93 = getelementptr i8, i8* %out, i64 5		; <i8*> [#uses=1]
-	store i8 %92, i8* %93, align 1
+	%93 = getelementptr i8, ptr %out, i64 5		; <i8*> [#uses=1]
+	store i8 %92, ptr %93, align 1
 	ret void
 }
 
@@ -304,7 +304,7 @@ bb2:		; preds = %bb
 ; is equal to the stride.
 ; It must not fold (cmp (add iv, 1), 1) --> (cmp iv, 0).
 
-define i32 @f(i32 %i, i32* nocapture %a) nounwind uwtable readonly ssp {
+define i32 @f(i32 %i, ptr nocapture %a) nounwind uwtable readonly ssp {
 ; GENERIC-LABEL: f:
 ; GENERIC:       ## %bb.0: ## %entry
 ; GENERIC-NEXT:    xorl %eax, %eax
@@ -366,8 +366,8 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
   %bi.06 = phi i32 [ 0, %for.body.lr.ph ], [ %i.addr.0.bi.0, %for.body ]
   %b.05 = phi i32 [ 0, %for.body.lr.ph ], [ %.b.0, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
-  %1 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
+  %1 = load i32, ptr %arrayidx, align 4
   %cmp1 = icmp ugt i32 %1, %b.05
   %.b.0 = select i1 %cmp1, i32 %1, i32 %b.05
   %2 = trunc i64 %indvars.iv to i32

diff  --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index 4d0045fec2649..df4155845f37b 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -5,7 +5,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512F
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512BW
 
-define i32 @_Z10test_shortPsS_i_128(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z10test_shortPsS_i_128(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z10test_shortPsS_i_128:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -61,13 +61,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <4 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <4 x i16>, ptr %5, align 2
   %6 = sext <4 x i16> %wide.load to <4 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <4 x i16>*
-  %wide.load14 = load <4 x i16>, <4 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <4 x i16>, ptr %8, align 2
   %9 = sext <4 x i16> %wide.load14 to <4 x i32>
   %10 = mul nsw <4 x i32> %9, %6
   %11 = add nsw <4 x i32> %10, %vec.phi
@@ -84,7 +84,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z10test_shortPsS_i_256(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z10test_shortPsS_i_256(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z10test_shortPsS_i_256:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -167,13 +167,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <8 x i16>, ptr %5, align 2
   %6 = sext <8 x i16> %wide.load to <8 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <8 x i16>*
-  %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <8 x i16>, ptr %8, align 2
   %9 = sext <8 x i16> %wide.load14 to <8 x i32>
   %10 = mul nsw <8 x i32> %9, %6
   %11 = add nsw <8 x i32> %10, %vec.phi
@@ -192,7 +192,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z10test_shortPsS_i_512(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z10test_shortPsS_i_512(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z10test_shortPsS_i_512:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -319,13 +319,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <16 x i16>*
-  %wide.load = load <16 x i16>, <16 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <16 x i16>, ptr %5, align 2
   %6 = sext <16 x i16> %wide.load to <16 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <16 x i16>*
-  %wide.load14 = load <16 x i16>, <16 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <16 x i16>, ptr %8, align 2
   %9 = sext <16 x i16> %wide.load14 to <16 x i32>
   %10 = mul nsw <16 x i32> %9, %6
   %11 = add nsw <16 x i32> %10, %vec.phi
@@ -346,7 +346,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z10test_shortPsS_i_1024(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z10test_shortPsS_i_1024(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z10test_shortPsS_i_1024:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -541,13 +541,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <32 x i16>*
-  %wide.load = load <32 x i16>, <32 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <32 x i16>, ptr %5, align 2
   %6 = sext <32 x i16> %wide.load to <32 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <32 x i16>*
-  %wide.load14 = load <32 x i16>, <32 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <32 x i16>, ptr %8, align 2
   %9 = sext <32 x i16> %wide.load14 to <32 x i32>
   %10 = mul nsw <32 x i32> %9, %6
   %11 = add nsw <32 x i32> %10, %vec.phi
@@ -570,7 +570,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z9test_charPcS_i_128(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z9test_charPcS_i_128(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z9test_charPcS_i_128:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -629,13 +629,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <4 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <4 x i8>*
-  %wide.load = load <4 x i8>, <4 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <4 x i8>, ptr %5, align 1
   %6 = sext <4 x i8> %wide.load to <4 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <4 x i8>*
-  %wide.load14 = load <4 x i8>, <4 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <4 x i8>, ptr %8, align 1
   %9 = sext <4 x i8> %wide.load14 to <4 x i32>
   %10 = mul nsw <4 x i32> %9, %6
   %11 = add nsw <4 x i32> %10, %vec.phi
@@ -652,7 +652,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z9test_charPcS_i_256(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z9test_charPcS_i_256:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -741,13 +741,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <8 x i8>, ptr %5, align 1
   %6 = sext <8 x i8> %wide.load to <8 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <8 x i8>*
-  %wide.load14 = load <8 x i8>, <8 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <8 x i8>, ptr %8, align 1
   %9 = sext <8 x i8> %wide.load14 to <8 x i32>
   %10 = mul nsw <8 x i32> %9, %6
   %11 = add nsw <8 x i32> %10, %vec.phi
@@ -766,7 +766,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z9test_charPcS_i_512(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z9test_charPcS_i_512:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -903,13 +903,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <16 x i8>, ptr %5, align 1
   %6 = sext <16 x i8> %wide.load to <16 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <16 x i8>*
-  %wide.load14 = load <16 x i8>, <16 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <16 x i8>, ptr %8, align 1
   %9 = sext <16 x i8> %wide.load14 to <16 x i32>
   %10 = mul nsw <16 x i32> %9, %6
   %11 = add nsw <16 x i32> %10, %vec.phi
@@ -930,7 +930,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @_Z9test_charPcS_i_1024(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @_Z9test_charPcS_i_1024(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: _Z9test_charPcS_i_1024:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -1146,13 +1146,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <32 x i8>*
-  %wide.load = load <32 x i8>, <32 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <32 x i8>, ptr %5, align 1
   %6 = sext <32 x i8> %wide.load to <32 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <32 x i8>*
-  %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <32 x i8>, ptr %8, align 1
   %9 = sext <32 x i8> %wide.load14 to <32 x i32>
   %10 = mul nsw <32 x i32> %9, %6
   %11 = add nsw <32 x i32> %10, %vec.phi
@@ -1175,7 +1175,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @test_unsigned_short_128(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @test_unsigned_short_128(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: test_unsigned_short_128:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -1231,13 +1231,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <4 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <4 x i16>, ptr %5, align 2
   %6 = zext <4 x i16> %wide.load to <4 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <4 x i16>*
-  %wide.load14 = load <4 x i16>, <4 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <4 x i16>, ptr %8, align 2
   %9 = zext <4 x i16> %wide.load14 to <4 x i32>
   %10 = mul nsw <4 x i32> %9, %6
   %11 = add nsw <4 x i32> %10, %vec.phi
@@ -1254,7 +1254,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @test_unsigned_short_256(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @test_unsigned_short_256(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: test_unsigned_short_256:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -1350,13 +1350,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <8 x i16>, ptr %5, align 2
   %6 = zext <8 x i16> %wide.load to <8 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <8 x i16>*
-  %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <8 x i16>, ptr %8, align 2
   %9 = zext <8 x i16> %wide.load14 to <8 x i32>
   %10 = mul nsw <8 x i32> %9, %6
   %11 = add nsw <8 x i32> %10, %vec.phi
@@ -1375,7 +1375,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @test_unsigned_short_512(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @test_unsigned_short_512(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: test_unsigned_short_512:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -1533,13 +1533,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <16 x i16>*
-  %wide.load = load <16 x i16>, <16 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <16 x i16>, ptr %5, align 2
   %6 = zext <16 x i16> %wide.load to <16 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <16 x i16>*
-  %wide.load14 = load <16 x i16>, <16 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <16 x i16>, ptr %8, align 2
   %9 = zext <16 x i16> %wide.load14 to <16 x i32>
   %10 = mul nsw <16 x i32> %9, %6
   %11 = add nsw <16 x i32> %10, %vec.phi
@@ -1560,7 +1560,7 @@ middle.block:
   ret i32 %13
 }
 
-define i32 @test_unsigned_short_1024(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+define i32 @test_unsigned_short_1024(ptr nocapture readonly, ptr nocapture readonly, i32) local_unnamed_addr #0 {
 ; SSE2-LABEL: test_unsigned_short_1024:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -1792,13 +1792,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i16, i16* %0, i64 %index
-  %5 = bitcast i16* %4 to <32 x i16>*
-  %wide.load = load <32 x i16>, <32 x i16>* %5, align 2
+  %4 = getelementptr inbounds i16, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <32 x i16>, ptr %5, align 2
   %6 = zext <32 x i16> %wide.load to <32 x i32>
-  %7 = getelementptr inbounds i16, i16* %1, i64 %index
-  %8 = bitcast i16* %7 to <32 x i16>*
-  %wide.load14 = load <32 x i16>, <32 x i16>* %8, align 2
+  %7 = getelementptr inbounds i16, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <32 x i16>, ptr %8, align 2
   %9 = zext <32 x i16> %wide.load14 to <32 x i32>
   %10 = mul nsw <32 x i32> %9, %6
   %11 = add nsw <32 x i32> %10, %vec.phi
@@ -2275,7 +2275,7 @@ define <32 x i32> @jumbled_indices32(<64 x i16> %A, <64 x i16> %B) {
 
 ; NOTE: We're testing with loads because ABI lowering creates a concat_vectors that extract_vector_elt creation can see through.
 ; This would require the combine to recreate the concat_vectors.
-define <4 x i32> @pmaddwd_128(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
+define <4 x i32> @pmaddwd_128(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_128:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
@@ -2287,8 +2287,8 @@ define <4 x i32> @pmaddwd_128(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpmaddwd (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %A = load <8 x i16>, <8 x i16>* %Aptr
-  %B = load <8 x i16>, <8 x i16>* %Bptr
+  %A = load <8 x i16>, ptr %Aptr
+  %B = load <8 x i16>, ptr %Bptr
   %A_even = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %A_odd = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %B_even = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -2303,7 +2303,7 @@ define <4 x i32> @pmaddwd_128(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
   ret <4 x i32> %add
 }
 
-define <8 x i32> @pmaddwd_256(<16 x i16>* %Aptr, <16 x i16>* %Bptr) {
+define <8 x i32> @pmaddwd_256(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_256:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
@@ -2326,8 +2326,8 @@ define <8 x i32> @pmaddwd_256(<16 x i16>* %Aptr, <16 x i16>* %Bptr) {
 ; AVX256-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX256-NEXT:    vpmaddwd (%rsi), %ymm0, %ymm0
 ; AVX256-NEXT:    retq
-  %A = load <16 x i16>, <16 x i16>* %Aptr
-  %B = load <16 x i16>, <16 x i16>* %Bptr
+  %A = load <16 x i16>, ptr %Aptr
+  %B = load <16 x i16>, ptr %Bptr
   %A_even = shufflevector <16 x i16> %A, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %A_odd = shufflevector <16 x i16> %A, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %B_even = shufflevector <16 x i16> %B, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -2342,7 +2342,7 @@ define <8 x i32> @pmaddwd_256(<16 x i16>* %Aptr, <16 x i16>* %Bptr) {
   ret <8 x i32> %add
 }
 
-define <16 x i32> @pmaddwd_512(<32 x i16>* %Aptr, <32 x i16>* %Bptr) {
+define <16 x i32> @pmaddwd_512(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_512:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
@@ -2391,8 +2391,8 @@ define <16 x i32> @pmaddwd_512(<32 x i16>* %Aptr, <32 x i16>* %Bptr) {
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpmaddwd (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
-  %A = load <32 x i16>, <32 x i16>* %Aptr
-  %B = load <32 x i16>, <32 x i16>* %Bptr
+  %A = load <32 x i16>, ptr %Aptr
+  %B = load <32 x i16>, ptr %Bptr
   %A_even = shufflevector <32 x i16> %A, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %A_odd = shufflevector <32 x i16> %A, <32 x i16> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %B_even = shufflevector <32 x i16> %B, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -2407,7 +2407,7 @@ define <16 x i32> @pmaddwd_512(<32 x i16>* %Aptr, <32 x i16>* %Bptr) {
   ret <16 x i32> %add
 }
 
-define <32 x i32> @pmaddwd_1024(<64 x i16>* %Aptr, <64 x i16>* %Bptr) {
+define <32 x i32> @pmaddwd_1024(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_1024:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %rdi, %rax
@@ -2494,8 +2494,8 @@ define <32 x i32> @pmaddwd_1024(<64 x i16>* %Aptr, <64 x i16>* %Bptr) {
 ; AVX512BW-NEXT:    vpmaddwd (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpmaddwd 64(%rsi), %zmm1, %zmm1
 ; AVX512BW-NEXT:    retq
-  %A = load <64 x i16>, <64 x i16>* %Aptr
-  %B = load <64 x i16>, <64 x i16>* %Bptr
+  %A = load <64 x i16>, ptr %Aptr
+  %B = load <64 x i16>, ptr %Bptr
   %A_even = shufflevector <64 x i16> %A, <64 x i16> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
   %A_odd = shufflevector <64 x i16> %A, <64 x i16> undef, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
   %B_even = shufflevector <64 x i16> %B, <64 x i16> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
@@ -2510,7 +2510,7 @@ define <32 x i32> @pmaddwd_1024(<64 x i16>* %Aptr, <64 x i16>* %Bptr) {
   ret <32 x i32> %add
 }
 
-define <4 x i32> @pmaddwd_commuted_mul(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
+define <4 x i32> @pmaddwd_commuted_mul(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_commuted_mul:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
@@ -2522,8 +2522,8 @@ define <4 x i32> @pmaddwd_commuted_mul(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpmaddwd (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %A = load <8 x i16>, <8 x i16>* %Aptr
-  %B = load <8 x i16>, <8 x i16>* %Bptr
+  %A = load <8 x i16>, ptr %Aptr
+  %B = load <8 x i16>, ptr %Bptr
   %A_even = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %A_odd = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %B_even = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -2538,7 +2538,7 @@ define <4 x i32> @pmaddwd_commuted_mul(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
   ret <4 x i32> %add
 }
 
-define <4 x i32> @pmaddwd_swapped_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
+define <4 x i32> @pmaddwd_swapped_indices(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_swapped_indices:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa (%rdi), %xmm0
@@ -2550,8 +2550,8 @@ define <4 x i32> @pmaddwd_swapped_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 ; AVX-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX-NEXT:    vpmaddwd (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %A = load <8 x i16>, <8 x i16>* %Aptr
-  %B = load <8 x i16>, <8 x i16>* %Bptr
+  %A = load <8 x i16>, ptr %Aptr
+  %B = load <8 x i16>, ptr %Bptr
   %A_even = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 1, i32 2, i32 5, i32 6> ; indices aren't all even
   %A_odd = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 3, i32 4, i32 7> ; indices aren't all odd
   %B_even = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 2, i32 5, i32 6> ; same indices as A
@@ -2567,7 +2567,7 @@ define <4 x i32> @pmaddwd_swapped_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 }
 
 ; Negative test where indices aren't paired properly
-define <4 x i32> @pmaddwd_bad_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
+define <4 x i32> @pmaddwd_bad_indices(ptr %Aptr, ptr %Bptr) {
 ; SSE2-LABEL: pmaddwd_bad_indices:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = mem[1,0,2,3,4,5,6,7]
@@ -2581,8 +2581,8 @@ define <4 x i32> @pmaddwd_bad_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 ; AVX-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
 ; AVX-NEXT:    vpmaddwd (%rsi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %A = load <8 x i16>, <8 x i16>* %Aptr
-  %B = load <8 x i16>, <8 x i16>* %Bptr
+  %A = load <8 x i16>, ptr %Aptr
+  %B = load <8 x i16>, ptr %Bptr
   %A_even = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 1, i32 2, i32 5, i32 6>
   %A_odd = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 3, i32 4, i32 7>
   %B_even = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> ; 
diff erent indices than A
@@ -2599,7 +2599,7 @@ define <4 x i32> @pmaddwd_bad_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
 
 ; This test contains two multiplies joined by an add. The result of that add is then reduced to a single element.
 ; SelectionDAGBuilder should tag the joining add as a vector reduction. We need to recognize that both sides can use pmaddwd
-define i32 @madd_double_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>* %arg2, <8 x i16>* %arg3) {
+define i32 @madd_double_reduction(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) {
 ; SSE2-LABEL: madd_double_reduction:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
@@ -2629,13 +2629,13 @@ define i32 @madd_double_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>*
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
-  %tmp = load <8 x i16>, <8 x i16>* %arg, align 1
-  %tmp6 = load <8 x i16>, <8 x i16>* %arg1, align 1
+  %tmp = load <8 x i16>, ptr %arg, align 1
+  %tmp6 = load <8 x i16>, ptr %arg1, align 1
   %tmp7 = sext <8 x i16> %tmp to <8 x i32>
   %tmp17 = sext <8 x i16> %tmp6 to <8 x i32>
   %tmp19 = mul nsw <8 x i32> %tmp7, %tmp17
-  %tmp20 = load <8 x i16>, <8 x i16>* %arg2, align 1
-  %tmp21 = load <8 x i16>, <8 x i16>* %arg3, align 1
+  %tmp20 = load <8 x i16>, ptr %arg2, align 1
+  %tmp21 = load <8 x i16>, ptr %arg3, align 1
   %tmp22 = sext <8 x i16> %tmp20 to <8 x i32>
   %tmp23 = sext <8 x i16> %tmp21 to <8 x i32>
   %tmp25 = mul nsw <8 x i32> %tmp22, %tmp23
@@ -2650,7 +2650,7 @@ define i32 @madd_double_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>*
   ret i32 %tmp35
 }
 
-define i32 @madd_quad_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>* %arg2, <8 x i16>* %arg3, <8 x i16>* %arg4, <8 x i16>* %arg5, <8 x i16>* %arg6, <8 x i16>* %arg7) {
+define i32 @madd_quad_reduction(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3, ptr %arg4, ptr %arg5, ptr %arg6, ptr %arg7) {
 ; SSE2-LABEL: madd_quad_reduction:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -2698,27 +2698,27 @@ define i32 @madd_quad_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>* %a
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
-  %tmp = load <8 x i16>, <8 x i16>* %arg, align 1
-  %tmp6 = load <8 x i16>, <8 x i16>* %arg1, align 1
+  %tmp = load <8 x i16>, ptr %arg, align 1
+  %tmp6 = load <8 x i16>, ptr %arg1, align 1
   %tmp7 = sext <8 x i16> %tmp to <8 x i32>
   %tmp17 = sext <8 x i16> %tmp6 to <8 x i32>
   %tmp19 = mul nsw <8 x i32> %tmp7, %tmp17
-  %tmp20 = load <8 x i16>, <8 x i16>* %arg2, align 1
-  %tmp21 = load <8 x i16>, <8 x i16>* %arg3, align 1
+  %tmp20 = load <8 x i16>, ptr %arg2, align 1
+  %tmp21 = load <8 x i16>, ptr %arg3, align 1
   %tmp22 = sext <8 x i16> %tmp20 to <8 x i32>
   %tmp23 = sext <8 x i16> %tmp21 to <8 x i32>
   %tmp25 = mul nsw <8 x i32> %tmp22, %tmp23
   %tmp26 = add nuw nsw <8 x i32> %tmp25, %tmp19
 
-  %tmp40 = load <8 x i16>, <8 x i16>* %arg4, align 1
-  %tmp41 = load <8 x i16>, <8 x i16>* %arg5, align 1
+  %tmp40 = load <8 x i16>, ptr %arg4, align 1
+  %tmp41 = load <8 x i16>, ptr %arg5, align 1
   %tmp42 = sext <8 x i16> %tmp40 to <8 x i32>
   %tmp43 = sext <8 x i16> %tmp41 to <8 x i32>
   %tmp45 = mul nsw <8 x i32> %tmp42, %tmp43
   %tmp56 = add nuw nsw <8 x i32> %tmp26, %tmp45
 
-  %tmp50 = load <8 x i16>, <8 x i16>* %arg6, align 1
-  %tmp51 = load <8 x i16>, <8 x i16>* %arg7, align 1
+  %tmp50 = load <8 x i16>, ptr %arg6, align 1
+  %tmp51 = load <8 x i16>, ptr %arg7, align 1
   %tmp52 = sext <8 x i16> %tmp50 to <8 x i32>
   %tmp53 = sext <8 x i16> %tmp51 to <8 x i32>
   %tmp55 = mul nsw <8 x i32> %tmp52, %tmp53
@@ -2734,7 +2734,7 @@ define i32 @madd_quad_reduction(<8 x i16>* %arg, <8 x i16>* %arg1, <8 x i16>* %a
   ret i32 %tmp35
 }
 
-define i64 @sum_and_sum_of_squares(i8* %a, i32 %n) {
+define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; SSE2-LABEL: sum_and_sum_of_squares:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %esi, %eax
@@ -2860,9 +2860,9 @@ vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <8 x i32> [ %6, %vector.body ], [ zeroinitializer, %entry ]
   %sum.phi = phi <8 x i32> [ %4, %vector.body ], [ zeroinitializer, %entry ]
-  %1 = getelementptr inbounds i8, i8* %a, i64 %index
-  %2 = bitcast i8* %1 to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %2, align 1
+  %1 = getelementptr inbounds i8, ptr %a, i64 %index
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <8 x i8>, ptr %2, align 1
   %3 = zext <8 x i8> %wide.load to <8 x i32>
   %4 = add nsw <8 x i32> %3, %sum.phi
   %5 = mul nsw <8 x i32> %3, %3
@@ -2893,7 +2893,7 @@ middle.block:
   ret i64 %tmp30
 }
 
-define i32 @sum_of_square_
diff erences(i8* %a, i8* %b, i32 %n) {
+define i32 @sum_of_square_
diff erences(ptr %a, ptr %b, i32 %n) {
 ; SSE2-LABEL: sum_of_square_
diff erences:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    movl %edx, %eax
@@ -2983,13 +2983,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <8 x i32> [ %9, %vector.body ], [ zeroinitializer, %entry ]
-  %1 = getelementptr inbounds i8, i8* %a, i64 %index
-  %2 = bitcast i8* %1 to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %2, align 1
+  %1 = getelementptr inbounds i8, ptr %a, i64 %index
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <8 x i8>, ptr %2, align 1
   %3 = zext <8 x i8> %wide.load to <8 x i32>
-  %4 = getelementptr inbounds i8, i8* %b, i64 %index
-  %5 = bitcast i8* %4 to <8 x i8>*
-  %wide.load2 = load <8 x i8>, <8 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %b, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load2 = load <8 x i8>, ptr %5, align 1
   %6 = zext <8 x i8> %wide.load2 to <8 x i32>
   %7 = sub <8 x i32> %6, %3
   %8 = mul <8 x i32> %7, %7
@@ -3011,7 +3011,7 @@ middle.block:
 
 ; PR49716 - https://llvm.org/PR49716
 
-define <4 x i32> @input_size_mismatch(<16 x i16> %x, <16 x i16>* %p) {
+define <4 x i32> @input_size_mismatch(<16 x i16> %x, ptr %p) {
 ; SSE2-LABEL: input_size_mismatch:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pmaddwd (%rdi), %xmm0
@@ -3022,7 +3022,7 @@ define <4 x i32> @input_size_mismatch(<16 x i16> %x, <16 x i16>* %p) {
 ; AVX-NEXT:    vpmaddwd (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-  %y = load <16 x i16>, <16 x i16>* %p, align 32
+  %y = load <16 x i16>, ptr %p, align 32
   %x0 = shufflevector <16 x i16> %x, <16 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %x1 = shufflevector <16 x i16> %x, <16 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %y0 = shufflevector <16 x i16> %y, <16 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -3096,7 +3096,7 @@ define <4 x i32> @output_size_mismatch_high_subvector(<16 x i16> %x, <16 x i16>
   ret <4 x i32> %r
 }
 
-define i32 @add_used_by_loop_phi(i8* %a, i8* %b, i64 %offset_a, i64 %offset_b, i64 %k) {
+define i32 @add_used_by_loop_phi(ptr %a, ptr %b, i64 %offset_a, i64 %offset_b, i64 %k) {
 ; SSE2-LABEL: add_used_by_loop_phi:
 ; SSE2:       # %bb.0: # %entry
 ; SSE2-NEXT:    addq %rdx, %rdi
@@ -3231,19 +3231,19 @@ define i32 @add_used_by_loop_phi(i8* %a, i8* %b, i64 %offset_a, i64 %offset_b, i
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
 entry:
-  %scevgep_a = getelementptr i8, i8* %a, i64 %offset_a
-  %scevgep_b = getelementptr i8, i8* %b, i64 %offset_b
+  %scevgep_a = getelementptr i8, ptr %a, i64 %offset_a
+  %scevgep_b = getelementptr i8, ptr %b, i64 %offset_b
   br label %loop
 
 loop:
   %t0 = phi <16 x i32> [ %3, %loop ], [ zeroinitializer, %entry ]
   %ivloop = phi i64 [ %nextivloop, %loop ], [ 0, %entry ]
-  %scevgep_a1 = getelementptr i8, i8* %scevgep_a, i64 %ivloop
-  %scevgep_a2 = bitcast i8* %scevgep_a1 to <16 x i8>*
-  %gepload_a = load <16 x i8>, <16 x i8>* %scevgep_a2, align 1
-  %scevgep_b1 = getelementptr i8, i8* %scevgep_b, i64 %ivloop
-  %scevgep_b2 = bitcast i8* %scevgep_b1 to <16 x i8>*
-  %gepload_b = load <16 x i8>, <16 x i8>* %scevgep_b2, align 1
+  %scevgep_a1 = getelementptr i8, ptr %scevgep_a, i64 %ivloop
+  %scevgep_a2 = bitcast ptr %scevgep_a1 to ptr
+  %gepload_a = load <16 x i8>, ptr %scevgep_a2, align 1
+  %scevgep_b1 = getelementptr i8, ptr %scevgep_b, i64 %ivloop
+  %scevgep_b2 = bitcast ptr %scevgep_b1 to ptr
+  %gepload_b = load <16 x i8>, ptr %scevgep_b2, align 1
   %0 = sext <16 x i8> %gepload_a to <16 x i32>
   %1 = sext <16 x i8> %gepload_b to <16 x i32>
   %2 = mul nsw <16 x i32> %0, %1

diff  --git a/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll b/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
index 99ffd63d69cf9..7765297dc673f 100644
--- a/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
+++ b/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
@@ -15,14 +15,14 @@
 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -32,14 +32,14 @@ define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -49,14 +49,14 @@ define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+define i32 @test_memmove(ptr nocapture %p, ptr nocapture readonly %q) {
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -65,12 +65,12 @@ define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR:      %2:gr64 = MOV64ri -6148914691236517206
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, %2 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
-  %p0 = bitcast i32* %p to i8*
-  tail call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+define i32 @test_memset(ptr nocapture %p, ptr nocapture readonly %q) {
+  %p0 = bitcast ptr %p to ptr
+  tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -80,24 +80,24 @@ define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  %call = tail call i8* @mempcpy(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  %call = tail call ptr @mempcpy(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
-declare i8* @mempcpy(i8*, i8*, i64)
+declare ptr @mempcpy(ptr, ptr, i64)
 
 !0 = distinct !{!0, !"bax"}
 !1 = distinct !{!1, !0, !"bax: %p"}

diff  --git a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
index 76fedc3e25898..c1fdd71c04948 100644
--- a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
+++ b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
@@ -28,19 +28,19 @@
 ; DBGDAG: X86ISD::RET_GLUE t{{[0-9]+}},
 
 ; DBGDAG-LABEL: Instruction selection begins
-define void @merge_store_partial_overlap_load([4 x i8]* %tmp) {
-  %tmp8 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i8 0
-  %tmp10 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i8 1
-  %tmp12 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i8 2
-  %tmp14 = getelementptr [4 x i8], [4 x i8]* %tmp, i32 0, i8 3
-
-  %tmp9 = load i8, i8* %tmp8, align 1   ; base + 0
-  %tmp11 = load i8, i8* %tmp10, align 1 ; base + 1
-  %tmp13 = load i8, i8* %tmp12, align 1 ; base + 2
-
-  store i8 %tmp9, i8* %tmp10, align 1   ; base + 1
-  store i8 %tmp11, i8* %tmp12, align 1  ; base + 2
-  store i8 %tmp13, i8* %tmp14, align 1  ; base + 3
+define void @merge_store_partial_overlap_load(ptr %tmp) {
+  %tmp8 = getelementptr inbounds [4 x i8], ptr %tmp, i32 0, i8 0
+  %tmp10 = getelementptr inbounds [4 x i8], ptr %tmp, i32 0, i8 1
+  %tmp12 = getelementptr inbounds [4 x i8], ptr %tmp, i32 0, i8 2
+  %tmp14 = getelementptr [4 x i8], ptr %tmp, i32 0, i8 3
+
+  %tmp9 = load i8, ptr %tmp8, align 1   ; base + 0
+  %tmp11 = load i8, ptr %tmp10, align 1 ; base + 1
+  %tmp13 = load i8, ptr %tmp12, align 1 ; base + 2
+
+  store i8 %tmp9, ptr %tmp10, align 1   ; base + 1
+  store i8 %tmp11, ptr %tmp12, align 1  ; base + 2
+  store i8 %tmp13, ptr %tmp14, align 1  ; base + 3
 
 ; Should emit
 ; load base + 0, base + 1

diff  --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
index b28a3f821141a..a953c505cd8ee 100644
--- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll
+++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
@@ -12,7 +12,7 @@
 
 ; This file primarily contains tests for specific places in X86ISelLowering.cpp that needed be made aware of the legalizer not allowing 512-bit vectors due to prefer-256-bit even though AVX512 is enabled.
 
-define dso_local void @add256(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "min-legal-vector-width"="256" {
+define dso_local void @add256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: add256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -23,14 +23,14 @@ define dso_local void @add256(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "m
 ; CHECK-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %d = load <16 x i32>, <16 x i32>* %a
-  %e = load <16 x i32>, <16 x i32>* %b
+  %d = load <16 x i32>, ptr %a
+  %e = load <16 x i32>, ptr %b
   %f = add <16 x i32> %d, %e
-  store <16 x i32> %f, <16 x i32>* %c
+  store <16 x i32> %f, ptr %c
   ret void
 }
 
-define dso_local void @add512(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "min-legal-vector-width"="512" {
+define dso_local void @add512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: add512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0
@@ -38,14 +38,14 @@ define dso_local void @add512(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "m
 ; CHECK-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %d = load <16 x i32>, <16 x i32>* %a
-  %e = load <16 x i32>, <16 x i32>* %b
+  %d = load <16 x i32>, ptr %a
+  %e = load <16 x i32>, ptr %b
   %f = add <16 x i32> %d, %e
-  store <16 x i32> %f, <16 x i32>* %c
+  store <16 x i32> %f, ptr %c
   ret void
 }
 
-define dso_local void @avg_v64i8_256(<64 x i8>* %a, <64 x i8>* %b) "min-legal-vector-width"="256" {
+define dso_local void @avg_v64i8_256(ptr %a, ptr %b) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: avg_v64i8_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm0
@@ -56,20 +56,20 @@ define dso_local void @avg_v64i8_256(<64 x i8>* %a, <64 x i8>* %b) "min-legal-ve
 ; CHECK-NEXT:    vmovdqu %ymm0, (%rax)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %1 = load <64 x i8>, <64 x i8>* %a
-  %2 = load <64 x i8>, <64 x i8>* %b
+  %1 = load <64 x i8>, ptr %a
+  %2 = load <64 x i8>, ptr %b
   %3 = zext <64 x i8> %1 to <64 x i32>
   %4 = zext <64 x i8> %2 to <64 x i32>
   %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %6 = add nuw nsw <64 x i32> %5, %4
   %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %8 = trunc <64 x i32> %7 to <64 x i8>
-  store <64 x i8> %8, <64 x i8>* undef, align 4
+  store <64 x i8> %8, ptr undef, align 4
   ret void
 }
 
 
-define dso_local void @avg_v64i8_512(<64 x i8>* %a, <64 x i8>* %b) "min-legal-vector-width"="512" {
+define dso_local void @avg_v64i8_512(ptr %a, ptr %b) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: avg_v64i8_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0
@@ -77,19 +77,19 @@ define dso_local void @avg_v64i8_512(<64 x i8>* %a, <64 x i8>* %b) "min-legal-ve
 ; CHECK-NEXT:    vmovdqu64 %zmm0, (%rax)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %1 = load <64 x i8>, <64 x i8>* %a
-  %2 = load <64 x i8>, <64 x i8>* %b
+  %1 = load <64 x i8>, ptr %a
+  %2 = load <64 x i8>, ptr %b
   %3 = zext <64 x i8> %1 to <64 x i32>
   %4 = zext <64 x i8> %2 to <64 x i32>
   %5 = add nuw nsw <64 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %6 = add nuw nsw <64 x i32> %5, %4
   %7 = lshr <64 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %8 = trunc <64 x i32> %7 to <64 x i8>
-  store <64 x i8> %8, <64 x i8>* undef, align 4
+  store <64 x i8> %8, ptr undef, align 4
   ret void
 }
 
-define dso_local void @pmaddwd_32_256(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16 x i32>* %CPtr) "min-legal-vector-width"="256" {
+define dso_local void @pmaddwd_32_256(ptr %APtr, ptr %BPtr, ptr %CPtr) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: pmaddwd_32_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -100,19 +100,19 @@ define dso_local void @pmaddwd_32_256(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16
 ; CHECK-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-   %A = load <32 x i16>, <32 x i16>* %APtr
-   %B = load <32 x i16>, <32 x i16>* %BPtr
+   %A = load <32 x i16>, ptr %APtr
+   %B = load <32 x i16>, ptr %BPtr
    %a = sext <32 x i16> %A to <32 x i32>
    %b = sext <32 x i16> %B to <32 x i32>
    %m = mul nsw <32 x i32> %a, %b
    %odd = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
    %even = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
    %ret = add <16 x i32> %odd, %even
-   store <16 x i32> %ret, <16 x i32>* %CPtr
+   store <16 x i32> %ret, ptr %CPtr
    ret void
 }
 
-define dso_local void @pmaddwd_32_512(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16 x i32>* %CPtr) "min-legal-vector-width"="512" {
+define dso_local void @pmaddwd_32_512(ptr %APtr, ptr %BPtr, ptr %CPtr) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: pmaddwd_32_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0
@@ -120,19 +120,19 @@ define dso_local void @pmaddwd_32_512(<32 x i16>* %APtr, <32 x i16>* %BPtr, <16
 ; CHECK-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-   %A = load <32 x i16>, <32 x i16>* %APtr
-   %B = load <32 x i16>, <32 x i16>* %BPtr
+   %A = load <32 x i16>, ptr %APtr
+   %B = load <32 x i16>, ptr %BPtr
    %a = sext <32 x i16> %A to <32 x i32>
    %b = sext <32 x i16> %B to <32 x i32>
    %m = mul nsw <32 x i32> %a, %b
    %odd = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
    %even = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
    %ret = add <16 x i32> %odd, %even
-   store <16 x i32> %ret, <16 x i32>* %CPtr
+   store <16 x i32> %ret, ptr %CPtr
    ret void
 }
 
-define dso_local void @psubus_64i8_max_256(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "min-legal-vector-width"="256" {
+define dso_local void @psubus_64i8_max_256(ptr %xptr, ptr %yptr, ptr %zptr) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: psubus_64i8_max_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -143,16 +143,16 @@ define dso_local void @psubus_64i8_max_256(<64 x i8>* %xptr, <64 x i8>* %yptr, <
 ; CHECK-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <64 x i8>, <64 x i8>* %xptr
-  %y = load <64 x i8>, <64 x i8>* %yptr
+  %x = load <64 x i8>, ptr %xptr
+  %y = load <64 x i8>, ptr %yptr
   %cmp = icmp ult <64 x i8> %x, %y
   %max = select <64 x i1> %cmp, <64 x i8> %y, <64 x i8> %x
   %res = sub <64 x i8> %max, %y
-  store <64 x i8> %res, <64 x i8>* %zptr
+  store <64 x i8> %res, ptr %zptr
   ret void
 }
 
-define dso_local void @psubus_64i8_max_512(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "min-legal-vector-width"="512" {
+define dso_local void @psubus_64i8_max_512(ptr %xptr, ptr %yptr, ptr %zptr) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: psubus_64i8_max_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm0
@@ -160,16 +160,16 @@ define dso_local void @psubus_64i8_max_512(<64 x i8>* %xptr, <64 x i8>* %yptr, <
 ; CHECK-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <64 x i8>, <64 x i8>* %xptr
-  %y = load <64 x i8>, <64 x i8>* %yptr
+  %x = load <64 x i8>, ptr %xptr
+  %y = load <64 x i8>, ptr %yptr
   %cmp = icmp ult <64 x i8> %x, %y
   %max = select <64 x i1> %cmp, <64 x i8> %y, <64 x i8> %x
   %res = sub <64 x i8> %max, %y
-  store <64 x i8> %res, <64 x i8>* %zptr
+  store <64 x i8> %res, ptr %zptr
   ret void
 }
 
-define dso_local i32 @_Z9test_charPcS_i_256(i8* nocapture readonly, i8* nocapture readonly, i32) "min-legal-vector-width"="256" {
+define dso_local i32 @_Z9test_charPcS_i_256(ptr nocapture readonly, ptr nocapture readonly, i32) "min-legal-vector-width"="256" {
 ; CHECK-SKX-LABEL: _Z9test_charPcS_i_256:
 ; CHECK-SKX:       # %bb.0: # %entry
 ; CHECK-SKX-NEXT:    movl %edx, %eax
@@ -281,13 +281,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <32 x i8>*
-  %wide.load = load <32 x i8>, <32 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <32 x i8>, ptr %5, align 1
   %6 = sext <32 x i8> %wide.load to <32 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <32 x i8>*
-  %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <32 x i8>, ptr %8, align 1
   %9 = sext <32 x i8> %wide.load14 to <32 x i32>
   %10 = mul nsw <32 x i32> %9, %6
   %11 = add nsw <32 x i32> %10, %vec.phi
@@ -310,7 +310,7 @@ middle.block:
   ret i32 %13
 }
 
-define dso_local i32 @_Z9test_charPcS_i_512(i8* nocapture readonly, i8* nocapture readonly, i32) "min-legal-vector-width"="512" {
+define dso_local i32 @_Z9test_charPcS_i_512(ptr nocapture readonly, ptr nocapture readonly, i32) "min-legal-vector-width"="512" {
 ; CHECK-SKX-LABEL: _Z9test_charPcS_i_512:
 ; CHECK-SKX:       # %bb.0: # %entry
 ; CHECK-SKX-NEXT:    movl %edx, %eax
@@ -407,13 +407,13 @@ entry:
 vector.body:
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
   %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ]
-  %4 = getelementptr inbounds i8, i8* %0, i64 %index
-  %5 = bitcast i8* %4 to <32 x i8>*
-  %wide.load = load <32 x i8>, <32 x i8>* %5, align 1
+  %4 = getelementptr inbounds i8, ptr %0, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load = load <32 x i8>, ptr %5, align 1
   %6 = sext <32 x i8> %wide.load to <32 x i32>
-  %7 = getelementptr inbounds i8, i8* %1, i64 %index
-  %8 = bitcast i8* %7 to <32 x i8>*
-  %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1
+  %7 = getelementptr inbounds i8, ptr %1, i64 %index
+  %8 = bitcast ptr %7 to ptr
+  %wide.load14 = load <32 x i8>, ptr %8, align 1
   %9 = sext <32 x i8> %wide.load14 to <32 x i32>
   %10 = mul nsw <32 x i32> %9, %6
   %11 = add nsw <32 x i32> %10, %vec.phi
@@ -520,13 +520,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <16 x i8>, ptr %1, align 4
   %2 = zext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <16 x i8>, ptr %4, align 4
   %5 = zext <16 x i8> %wide.load1 to <16 x i32>
   %6 = sub nsw <16 x i32> %2, %5
   %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -631,13 +631,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <16 x i8>, ptr %1, align 4
   %2 = zext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <16 x i8>, ptr %4, align 4
   %5 = zext <16 x i8> %wide.load1 to <16 x i32>
   %6 = sub nsw <16 x i32> %2, %5
   %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -661,7 +661,7 @@ middle.block:
   ret i32 %12
 }
 
-define dso_local void @sbto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="256" {
+define dso_local void @sbto16f32_256(<16 x i16> %a, ptr %res) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: sbto16f32_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -676,11 +676,11 @@ define dso_local void @sbto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-leg
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x float>
-  store <16 x float> %1, <16 x float>* %res
+  store <16 x float> %1, ptr %res
   ret void
 }
 
-define dso_local void @sbto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="512" {
+define dso_local void @sbto16f32_512(<16 x i16> %a, ptr %res) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: sbto16f32_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -691,11 +691,11 @@ define dso_local void @sbto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-leg
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x float>
-  store <16 x float> %1, <16 x float>* %res
+  store <16 x float> %1, ptr %res
   ret void
 }
 
-define dso_local void @sbto16f64_256(<16 x i16> %a, <16 x double>* %res)  "min-legal-vector-width"="256" {
+define dso_local void @sbto16f64_256(<16 x i16> %a, ptr %res)  "min-legal-vector-width"="256" {
 ; CHECK-LABEL: sbto16f64_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -716,11 +716,11 @@ define dso_local void @sbto16f64_256(<16 x i16> %a, <16 x double>* %res)  "min-l
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x double>
-  store <16 x double> %1, <16 x double>* %res
+  store <16 x double> %1, ptr %res
   ret void
 }
 
-define dso_local void @sbto16f64_512(<16 x i16> %a, <16 x double>* %res)  "min-legal-vector-width"="512" {
+define dso_local void @sbto16f64_512(<16 x i16> %a, ptr %res)  "min-legal-vector-width"="512" {
 ; CHECK-LABEL: sbto16f64_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -734,11 +734,11 @@ define dso_local void @sbto16f64_512(<16 x i16> %a, <16 x double>* %res)  "min-l
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x double>
-  store <16 x double> %1, <16 x double>* %res
+  store <16 x double> %1, ptr %res
   ret void
 }
 
-define dso_local void @ubto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="256" {
+define dso_local void @ubto16f32_256(<16 x i16> %a, ptr %res) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: ubto16f32_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -755,11 +755,11 @@ define dso_local void @ubto16f32_256(<16 x i16> %a, <16 x float>* %res) "min-leg
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x float>
-  store <16 x float> %1, <16 x float>* %res
+  store <16 x float> %1, ptr %res
   ret void
 }
 
-define dso_local void @ubto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-legal-vector-width"="512" {
+define dso_local void @ubto16f32_512(<16 x i16> %a, ptr %res) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: ubto16f32_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -771,11 +771,11 @@ define dso_local void @ubto16f32_512(<16 x i16> %a, <16 x float>* %res) "min-leg
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x float>
-  store <16 x float> %1, <16 x float>* %res
+  store <16 x float> %1, ptr %res
   ret void
 }
 
-define dso_local void @ubto16f64_256(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="256" {
+define dso_local void @ubto16f64_256(<16 x i16> %a, ptr %res) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: ubto16f64_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -798,11 +798,11 @@ define dso_local void @ubto16f64_256(<16 x i16> %a, <16 x double>* %res) "min-le
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
-  store <16 x double> %1, <16 x double>* %res
+  store <16 x double> %1, ptr %res
   ret void
 }
 
-define dso_local void @ubto16f64_512(<16 x i16> %a, <16 x double>* %res) "min-legal-vector-width"="512" {
+define dso_local void @ubto16f64_512(<16 x i16> %a, ptr %res) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: ubto16f64_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovw2m %ymm0, %k0
@@ -817,11 +817,11 @@ define dso_local void @ubto16f64_512(<16 x i16> %a, <16 x double>* %res) "min-le
 ; CHECK-NEXT:    retq
   %mask = icmp slt <16 x i16> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
-  store <16 x double> %1, <16 x double>* %res
+  store <16 x double> %1, ptr %res
   ret void
 }
 
-define <16 x i16> @test_16f32toub_256(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
+define <16 x i16> @test_16f32toub_256(ptr %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: test_16f32toub_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq (%rdi), %ymm1
@@ -833,13 +833,13 @@ define <16 x i16> @test_16f32toub_256(<16 x float>* %ptr, <16 x i16> %passthru)
 ; CHECK-NEXT:    kunpckbw %k0, %k1, %k1
 ; CHECK-NEXT:    vmovdqu16 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
-  %a = load <16 x float>, <16 x float>* %ptr
+  %a = load <16 x float>, ptr %ptr
   %mask = fptoui <16 x float> %a to <16 x i1>
   %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
   ret <16 x i16> %select
 }
 
-define <16 x i16> @test_16f32toub_512(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
+define <16 x i16> @test_16f32toub_512(ptr %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: test_16f32toub_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq (%rdi), %zmm1
@@ -847,13 +847,13 @@ define <16 x i16> @test_16f32toub_512(<16 x float>* %ptr, <16 x i16> %passthru)
 ; CHECK-NEXT:    vpmovd2m %zmm1, %k1
 ; CHECK-NEXT:    vmovdqu16 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
-  %a = load <16 x float>, <16 x float>* %ptr
+  %a = load <16 x float>, ptr %ptr
   %mask = fptoui <16 x float> %a to <16 x i1>
   %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
   ret <16 x i16> %select
 }
 
-define <16 x i16> @test_16f32tosb_256(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
+define <16 x i16> @test_16f32tosb_256(ptr %ptr, <16 x i16> %passthru) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: test_16f32tosb_256:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq (%rdi), %ymm1
@@ -863,26 +863,26 @@ define <16 x i16> @test_16f32tosb_256(<16 x float>* %ptr, <16 x i16> %passthru)
 ; CHECK-NEXT:    kunpckbw %k0, %k1, %k1
 ; CHECK-NEXT:    vmovdqu16 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
-  %a = load <16 x float>, <16 x float>* %ptr
+  %a = load <16 x float>, ptr %ptr
   %mask = fptosi <16 x float> %a to <16 x i1>
   %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
   ret <16 x i16> %select
 }
 
-define <16 x i16> @test_16f32tosb_512(<16 x float>* %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
+define <16 x i16> @test_16f32tosb_512(ptr %ptr, <16 x i16> %passthru) "min-legal-vector-width"="512" {
 ; CHECK-LABEL: test_16f32tosb_512:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttps2dq (%rdi), %zmm1
 ; CHECK-NEXT:    vpmovd2m %zmm1, %k1
 ; CHECK-NEXT:    vmovdqu16 %ymm0, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    retq
-  %a = load <16 x float>, <16 x float>* %ptr
+  %a = load <16 x float>, ptr %ptr
   %mask = fptosi <16 x float> %a to <16 x i1>
   %select = select <16 x i1> %mask, <16 x i16> %passthru, <16 x i16> zeroinitializer
   ret <16 x i16> %select
 }
 
-define dso_local void @mul256(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-legal-vector-width"="256" {
+define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="256" {
 ; CHECK-SKX-VBMI-LABEL: mul256:
 ; CHECK-SKX-VBMI:       # %bb.0:
 ; CHECK-SKX-VBMI-NEXT:    vmovdqa (%rdi), %ymm0
@@ -964,14 +964,14 @@ define dso_local void @mul256(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-
 ; CHECK-VBMI-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-VBMI-NEXT:    vzeroupper
 ; CHECK-VBMI-NEXT:    retq
-  %d = load <64 x i8>, <64 x i8>* %a
-  %e = load <64 x i8>, <64 x i8>* %b
+  %d = load <64 x i8>, ptr %a
+  %e = load <64 x i8>, ptr %b
   %f = mul <64 x i8> %d, %e
-  store <64 x i8> %f, <64 x i8>* %c
+  store <64 x i8> %f, ptr %c
   ret void
 }
 
-define dso_local void @mul512(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-legal-vector-width"="512" {
+define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="512" {
 ; CHECK-SKX-VBMI-LABEL: mul512:
 ; CHECK-SKX-VBMI:       # %bb.0:
 ; CHECK-SKX-VBMI-NEXT:    vmovdqa64 (%rdi), %zmm0
@@ -1021,27 +1021,27 @@ define dso_local void @mul512(<64 x i8>* %a, <64 x i8>* %b, <64 x i8>* %c) "min-
 ; CHECK-VBMI-NEXT:    vmovdqa64 %zmm1, (%rdx)
 ; CHECK-VBMI-NEXT:    vzeroupper
 ; CHECK-VBMI-NEXT:    retq
-  %d = load <64 x i8>, <64 x i8>* %a
-  %e = load <64 x i8>, <64 x i8>* %b
+  %d = load <64 x i8>, ptr %a
+  %e = load <64 x i8>, ptr %b
   %f = mul <64 x i8> %d, %e
-  store <64 x i8> %f, <64 x i8>* %c
+  store <64 x i8> %f, ptr %c
   ret void
 }
 
 ; This threw an assertion at one point.
-define <4 x i32> @mload_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) "min-legal-vector-width"="256" {
+define <4 x i32> @mload_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %dst) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: mload_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vptestnmd %xmm0, %xmm0, %k1
 ; CHECK-NEXT:    vpblendmd (%rdi), %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask = icmp eq <4 x i32> %trigger, zeroinitializer
-  %res = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr, i32 4, <4 x i1> %mask, <4 x i32> %dst)
+  %res = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr, i32 4, <4 x i1> %mask, <4 x i32> %dst)
   ret <4 x i32> %res
 }
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
 
-define <16 x i32> @trunc_v16i64_v16i32(<16 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <16 x i32> @trunc_v16i64_v16i32(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v16i64_v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1055,12 +1055,12 @@ define <16 x i32> @trunc_v16i64_v16i32(<16 x i64>* %x) nounwind "min-legal-vecto
 ; CHECK-NEXT:    vpmovqd %ymm3, %xmm2
 ; CHECK-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; CHECK-NEXT:    retq
-  %a = load <16 x i64>, <16 x i64>* %x
+  %a = load <16 x i64>, ptr %x
   %b = trunc <16 x i64> %a to <16 x i32>
   ret <16 x i32> %b
 }
 
-define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <16 x i8> @trunc_v16i64_v16i8(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v16i64_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1076,12 +1076,12 @@ define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %x) nounwind "min-legal-vector-
 ; CHECK-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <16 x i64>, <16 x i64>* %x
+  %a = load <16 x i64>, ptr %x
   %b = trunc <16 x i64> %a to <16 x i8>
   ret <16 x i8> %b
 }
 
-define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+define <16 x i8> @trunc_v16i32_v16i8(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v16i32_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1091,12 +1091,12 @@ define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %x) nounwind "min-legal-vector-
 ; CHECK-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <16 x i32>, <16 x i32>* %x
+  %a = load <16 x i32>, ptr %x
   %b = trunc <16 x i32> %a to <16 x i8>
   ret <16 x i8> %b
 }
 
-define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <8 x i8> @trunc_v8i64_v8i8(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v8i64_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1106,12 +1106,12 @@ define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %x) nounwind "min-legal-vector-widt
 ; CHECK-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <8 x i64>, <8 x i64>* %x
+  %a = load <8 x i64>, ptr %x
   %b = trunc <8 x i64> %a to <8 x i8>
   ret <8 x i8> %b
 }
 
-define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <8 x i16> @trunc_v8i64_v8i16(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v8i64_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1121,12 +1121,12 @@ define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %x) nounwind "min-legal-vector-wi
 ; CHECK-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <8 x i64>, <8 x i64>* %x
+  %a = load <8 x i64>, ptr %x
   %b = trunc <8 x i64> %a to <8 x i16>
   ret <8 x i16> %b
 }
 
-define <8 x i32> @trunc_v8i64_v8i32_zeroes(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <8 x i32> @trunc_v8i64_v8i32_zeroes(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v8i64_v8i32_zeroes:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlq $48, 32(%rdi), %ymm0
@@ -1134,26 +1134,26 @@ define <8 x i32> @trunc_v8i64_v8i32_zeroes(<8 x i64>* %x) nounwind "min-legal-ve
 ; CHECK-NEXT:    vpackusdw %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; CHECK-NEXT:    retq
-  %a = load <8 x i64>, <8 x i64>* %x
+  %a = load <8 x i64>, ptr %x
   %b = lshr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
   %c = trunc <8 x i64> %b to <8 x i32>
   ret <8 x i32> %c
 }
 
-define <16 x i16> @trunc_v16i32_v16i16_zeroes(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+define <16 x i16> @trunc_v16i32_v16i16_zeroes(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v16i32_v16i16_zeroes:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm1
 ; CHECK-NEXT:    vpmovsxbw {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
 ; CHECK-NEXT:    vpermi2w 32(%rdi), %ymm1, %ymm0
 ; CHECK-NEXT:    retq
-  %a = load <16 x i32>, <16 x i32>* %x
+  %a = load <16 x i32>, ptr %x
   %b = lshr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
   %c = trunc <16 x i32> %b to <16 x i16>
   ret <16 x i16> %c
 }
 
-define <32 x i8> @trunc_v32i16_v32i8_zeroes(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
+define <32 x i8> @trunc_v32i16_v32i8_zeroes(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_zeroes:
 ; CHECK-SKX-VBMI:       # %bb.0:
 ; CHECK-SKX-VBMI-NEXT:    vmovdqa (%rdi), %ymm1
@@ -1175,13 +1175,13 @@ define <32 x i8> @trunc_v32i16_v32i8_zeroes(<32 x i16>* %x) nounwind "min-legal-
 ; CHECK-VBMI-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
 ; CHECK-VBMI-NEXT:    vpermi2b 32(%rdi), %ymm1, %ymm0
 ; CHECK-VBMI-NEXT:    retq
-  %a = load <32 x i16>, <32 x i16>* %x
+  %a = load <32 x i16>, ptr %x
   %b = lshr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   %c = trunc <32 x i16> %b to <32 x i8>
   ret <32 x i8> %c
 }
 
-define <8 x i32> @trunc_v8i64_v8i32_sign(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+define <8 x i32> @trunc_v8i64_v8i32_sign(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v8i64_v8i32_sign:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsraq $48, 32(%rdi), %ymm0
@@ -1189,26 +1189,26 @@ define <8 x i32> @trunc_v8i64_v8i32_sign(<8 x i64>* %x) nounwind "min-legal-vect
 ; CHECK-NEXT:    vpackssdw %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; CHECK-NEXT:    retq
-  %a = load <8 x i64>, <8 x i64>* %x
+  %a = load <8 x i64>, ptr %x
   %b = ashr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
   %c = trunc <8 x i64> %b to <8 x i32>
   ret <8 x i32> %c
 }
 
-define <16 x i16> @trunc_v16i32_v16i16_sign(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+define <16 x i16> @trunc_v16i32_v16i16_sign(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_v16i32_v16i16_sign:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm1
 ; CHECK-NEXT:    vpmovsxbw {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
 ; CHECK-NEXT:    vpermi2w 32(%rdi), %ymm1, %ymm0
 ; CHECK-NEXT:    retq
-  %a = load <16 x i32>, <16 x i32>* %x
+  %a = load <16 x i32>, ptr %x
   %b = ashr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
   %c = trunc <16 x i32> %b to <16 x i16>
   ret <16 x i16> %c
 }
 
-define <32 x i8> @trunc_v32i16_v32i8_sign(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
+define <32 x i8> @trunc_v32i16_v32i8_sign(ptr %x) nounwind "min-legal-vector-width"="256" {
 ; CHECK-SKX-VBMI-LABEL: trunc_v32i16_v32i8_sign:
 ; CHECK-SKX-VBMI:       # %bb.0:
 ; CHECK-SKX-VBMI-NEXT:    vmovdqa (%rdi), %ymm1
@@ -1230,13 +1230,13 @@ define <32 x i8> @trunc_v32i16_v32i8_sign(<32 x i16>* %x) nounwind "min-legal-ve
 ; CHECK-VBMI-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
 ; CHECK-VBMI-NEXT:    vpermi2b 32(%rdi), %ymm1, %ymm0
 ; CHECK-VBMI-NEXT:    retq
-  %a = load <32 x i16>, <32 x i16>* %x
+  %a = load <32 x i16>, ptr %x
   %b = ashr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   %c = trunc <32 x i16> %b to <32 x i8>
   ret <32 x i8> %c
 }
 
-define dso_local void @zext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind "min-legal-vector-width"="256" {
+define dso_local void @zext_v16i8_v16i64(<16 x i8> %x, ptr %y) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: zext_v16i8_v16i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -1254,11 +1254,11 @@ define dso_local void @zext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %a = zext <16 x i8> %x to <16 x i64>
-  store <16 x i64> %a, <16 x i64>* %y
+  store <16 x i64> %a, ptr %y
   ret void
 }
 
-define dso_local void @sext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind "min-legal-vector-width"="256" {
+define dso_local void @sext_v16i8_v16i64(<16 x i8> %x, ptr %y) nounwind "min-legal-vector-width"="256" {
 ; CHECK-LABEL: sext_v16i8_v16i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpmovsxbw %xmm0, %ymm1
@@ -1276,11 +1276,11 @@ define dso_local void @sext_v16i8_v16i64(<16 x i8> %x, <16 x i64>* %y) nounwind
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %a = sext <16 x i8> %x to <16 x i64>
-  store <16 x i64> %a, <16 x i64>* %y
+  store <16 x i64> %a, ptr %y
   ret void
 }
 
-define dso_local void @vselect_split_v8i16_setcc(<8 x i16> %s, <8 x i16> %t, <8 x i64>* %p, <8 x i64>* %q, <8 x i64>* %r) "min-legal-vector-width"="256" {
+define dso_local void @vselect_split_v8i16_setcc(<8 x i16> %s, <8 x i16> %t, ptr %p, ptr %q, ptr %r) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: vselect_split_v8i16_setcc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm2
@@ -1293,15 +1293,15 @@ define dso_local void @vselect_split_v8i16_setcc(<8 x i16> %s, <8 x i16> %t, <8
 ; CHECK-NEXT:    vmovdqa %ymm3, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <8 x i64>, <8 x i64>* %p
-  %y = load <8 x i64>, <8 x i64>* %q
+  %x = load <8 x i64>, ptr %p
+  %y = load <8 x i64>, ptr %q
   %a = icmp eq <8 x i16> %s, %t
   %b = select <8 x i1> %a, <8 x i64> %x, <8 x i64> %y
-  store <8 x i64> %b, <8 x i64>* %r
+  store <8 x i64> %b, ptr %r
   ret void
 }
 
-define dso_local void @vselect_split_v8i32_setcc(<8 x i32> %s, <8 x i32> %t, <8 x i64>* %p, <8 x i64>* %q, <8 x i64>* %r) "min-legal-vector-width"="256" {
+define dso_local void @vselect_split_v8i32_setcc(<8 x i32> %s, <8 x i32> %t, ptr %p, ptr %q, ptr %r) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: vselect_split_v8i32_setcc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm2
@@ -1314,15 +1314,15 @@ define dso_local void @vselect_split_v8i32_setcc(<8 x i32> %s, <8 x i32> %t, <8
 ; CHECK-NEXT:    vmovdqa %ymm3, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <8 x i64>, <8 x i64>* %p
-  %y = load <8 x i64>, <8 x i64>* %q
+  %x = load <8 x i64>, ptr %p
+  %y = load <8 x i64>, ptr %q
   %a = icmp eq <8 x i32> %s, %t
   %b = select <8 x i1> %a, <8 x i64> %x, <8 x i64> %y
-  store <8 x i64> %b, <8 x i64>* %r
+  store <8 x i64> %b, ptr %r
   ret void
 }
 
-define dso_local void @vselect_split_v16i8_setcc(<16 x i8> %s, <16 x i8> %t, <16 x i32>* %p, <16 x i32>* %q, <16 x i32>* %r) "min-legal-vector-width"="256" {
+define dso_local void @vselect_split_v16i8_setcc(<16 x i8> %s, <16 x i8> %t, ptr %p, ptr %q, ptr %r) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: vselect_split_v16i8_setcc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm2
@@ -1335,15 +1335,15 @@ define dso_local void @vselect_split_v16i8_setcc(<16 x i8> %s, <16 x i8> %t, <16
 ; CHECK-NEXT:    vmovdqa %ymm3, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <16 x i32>, <16 x i32>* %p
-  %y = load <16 x i32>, <16 x i32>* %q
+  %x = load <16 x i32>, ptr %p
+  %y = load <16 x i32>, ptr %q
   %a = icmp eq <16 x i8> %s, %t
   %b = select <16 x i1> %a, <16 x i32> %x, <16 x i32> %y
-  store <16 x i32> %b, <16 x i32>* %r
+  store <16 x i32> %b, ptr %r
   ret void
 }
 
-define dso_local void @vselect_split_v16i16_setcc(<16 x i16> %s, <16 x i16> %t, <16 x i32>* %p, <16 x i32>* %q, <16 x i32>* %r) "min-legal-vector-width"="256" {
+define dso_local void @vselect_split_v16i16_setcc(<16 x i16> %s, <16 x i16> %t, ptr %p, ptr %q, ptr %r) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: vselect_split_v16i16_setcc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm2
@@ -1356,15 +1356,15 @@ define dso_local void @vselect_split_v16i16_setcc(<16 x i16> %s, <16 x i16> %t,
 ; CHECK-NEXT:    vmovdqa %ymm3, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <16 x i32>, <16 x i32>* %p
-  %y = load <16 x i32>, <16 x i32>* %q
+  %x = load <16 x i32>, ptr %p
+  %y = load <16 x i32>, ptr %q
   %a = icmp eq <16 x i16> %s, %t
   %b = select <16 x i1> %a, <16 x i32> %x, <16 x i32> %y
-  store <16 x i32> %b, <16 x i32>* %r
+  store <16 x i32> %b, ptr %r
   ret void
 }
 
-define <16 x i8> @trunc_packus_v16i32_v16i8(<16 x i32>* %p) "min-legal-vector-width"="256" {
+define <16 x i8> @trunc_packus_v16i32_v16i8(ptr %p) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_packus_v16i32_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1373,7 +1373,7 @@ define <16 x i8> @trunc_packus_v16i32_v16i8(<16 x i32>* %p) "min-legal-vector-wi
 ; CHECK-NEXT:    vpmovuswb %ymm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <16 x i32>, <16 x i32>* %p
+  %a = load <16 x i32>, ptr %p
   %b = icmp slt <16 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
   %c = select <16 x i1> %b, <16 x i32> %a, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
   %d = icmp sgt <16 x i32> %c, zeroinitializer
@@ -1382,7 +1382,7 @@ define <16 x i8> @trunc_packus_v16i32_v16i8(<16 x i32>* %p) "min-legal-vector-wi
   ret <16 x i8> %f
 }
 
-define dso_local void @trunc_packus_v16i32_v16i8_store(<16 x i32>* %p, <16 x i8>* %q) "min-legal-vector-width"="256" {
+define dso_local void @trunc_packus_v16i32_v16i8_store(ptr %p, ptr %q) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: trunc_packus_v16i32_v16i8_store:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm0
@@ -1391,13 +1391,13 @@ define dso_local void @trunc_packus_v16i32_v16i8_store(<16 x i32>* %p, <16 x i8>
 ; CHECK-NEXT:    vpmovuswb %ymm0, (%rsi)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %a = load <16 x i32>, <16 x i32>* %p
+  %a = load <16 x i32>, ptr %p
   %b = icmp slt <16 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
   %c = select <16 x i1> %b, <16 x i32> %a, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
   %d = icmp sgt <16 x i32> %c, zeroinitializer
   %e = select <16 x i1> %d, <16 x i32> %c, <16 x i32> zeroinitializer
   %f = trunc <16 x i32> %e to <16 x i8>
-  store <16 x i8> %f, <16 x i8>* %q
+  store <16 x i8> %f, ptr %q
   ret void
 }
 
@@ -1408,7 +1408,7 @@ define <64 x i1> @v64i1_argument_return(<64 x i1> %x) "min-legal-vector-width"="
   ret <64 x i1> %x
 }
 
-define dso_local void @v64i1_shuffle(<64 x i8>* %x, <64 x i8>* %y) "min-legal-vector-width"="256" {
+define dso_local void @v64i1_shuffle(ptr %x, ptr %y) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: v64i1_shuffle:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmovdqa (%rdi), %ymm1
@@ -1857,13 +1857,13 @@ define dso_local void @v64i1_shuffle(<64 x i8>* %x, <64 x i8>* %y) "min-legal-ve
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
 entry:
-  %a = load <64 x i8>, <64 x i8>* %x
+  %a = load <64 x i8>, ptr %x
   %b = icmp eq <64 x i8> %a, zeroinitializer
   %shuf = shufflevector <64 x i1> %b, <64 x i1> undef, <64 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14, i32 17, i32 16, i32 19, i32 18, i32 21, i32 20, i32 23, i32 22, i32 25, i32 24, i32 27, i32 26, i32 29, i32 28, i32 31, i32 30, i32 33, i32 32, i32 35, i32 34, i32 37, i32 36, i32 39, i32 38, i32 41, i32 40, i32 43, i32 42, i32 45, i32 44, i32 47, i32 46, i32 49, i32 48, i32 51, i32 50, i32 53, i32 52, i32 55, i32 54, i32 57, i32 56, i32 59, i32 58, i32 61, i32 60, i32 63, i32 62>
-  call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %a, <64 x i8>* %y, i32 1, <64 x i1> %shuf)
+  call void @llvm.masked.store.v64i8.p0(<64 x i8> %a, ptr %y, i32 1, <64 x i1> %shuf)
   ret void
 }
-declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
+declare void @llvm.masked.store.v64i8.p0(<64 x i8>, ptr, i32, <64 x i1>)
 
 @mem64_dst = dso_local global i64 0, align 8
 @mem64_src = dso_local global i64 0, align 8
@@ -1877,14 +1877,14 @@ define dso_local i32 @v64i1_inline_asm() "min-legal-vector-width"="256" {
 ; CHECK-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; CHECK-NEXT:    retq
   %1 = alloca i32, align 4
-  %2 = load i64, i64* @mem64_src, align 8
+  %2 = load i64, ptr @mem64_src, align 8
   %3 = call i64 asm "", "=k,k,~{dirflag},~{fpsr},~{flags}"(i64 %2)
-  store i64 %3, i64* @mem64_dst, align 8
-  %4 = load i32, i32* %1, align 4
+  store i64 %3, ptr @mem64_dst, align 8
+  %4 = load i32, ptr %1, align 4
   ret i32 %4
 }
 
-define dso_local void @cmp_v8i64_sext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i64>* %zptr) "min-legal-vector-width"="256" {
+define dso_local void @cmp_v8i64_sext(ptr %xptr, ptr %yptr, ptr %zptr) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: cmp_v8i64_sext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm0
@@ -1895,15 +1895,15 @@ define dso_local void @cmp_v8i64_sext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i
 ; CHECK-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <8 x i64>, <8 x i64>* %xptr
-  %y = load <8 x i64>, <8 x i64>* %yptr
+  %x = load <8 x i64>, ptr %xptr
+  %y = load <8 x i64>, ptr %yptr
   %cmp = icmp slt <8 x i64> %x, %y
   %ext = sext <8 x i1> %cmp to <8 x i64>
-  store <8 x i64> %ext, <8 x i64>* %zptr
+  store <8 x i64> %ext, ptr %zptr
   ret void
 }
 
-define dso_local void @cmp_v8i64_zext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i64>* %zptr) "min-legal-vector-width"="256" {
+define dso_local void @cmp_v8i64_zext(ptr %xptr, ptr %yptr, ptr %zptr) "min-legal-vector-width"="256" {
 ; CHECK-LABEL: cmp_v8i64_zext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmovdqa (%rsi), %ymm0
@@ -1916,11 +1916,11 @@ define dso_local void @cmp_v8i64_zext(<8 x i64>* %xptr, <8 x i64>* %yptr, <8 x i
 ; CHECK-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
-  %x = load <8 x i64>, <8 x i64>* %xptr
-  %y = load <8 x i64>, <8 x i64>* %yptr
+  %x = load <8 x i64>, ptr %xptr
+  %y = load <8 x i64>, ptr %yptr
   %cmp = icmp slt <8 x i64> %x, %y
   %ext = zext <8 x i1> %cmp to <8 x i64>
-  store <8 x i64> %ext, <8 x i64>* %zptr
+  store <8 x i64> %ext, ptr %zptr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir b/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
index 237177213590f..523940e2d6755 100644
--- a/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
+++ b/llvm/test/CodeGen/X86/post-ra-sched-with-debug.mir
@@ -89,63 +89,63 @@
 
 --- |
 
-  %class.s = type <{ %class.l, [4 x i8], %class.p, %class.p*, i32, [4 x i8] }>
+  %class.s = type <{ %class.l, [4 x i8], %class.p, ptr, i32, [4 x i8] }>
   %class.l = type { i32 }
   %class.p = type { %class.b }
-  %class.b = type { %struct.a*, %struct.a* }
+  %class.b = type { ptr, ptr }
   %struct.a = type opaque
 
-  @n = local_unnamed_addr global i32* null, align 8
+  @n = local_unnamed_addr global ptr null, align 8
   @o = global i32 0, align 4
 
-  define linkonce_odr void @_ZN1sC2Ei(%class.s*, i32) unnamed_addr #0 align 2 !dbg !4 {
+  define linkonce_odr void @_ZN1sC2Ei(ptr, i32) unnamed_addr #0 align 2 !dbg !4 {
     %3 = alloca i32, align 4
-    %4 = bitcast %class.s* %0 to %class.l*
-    tail call void @_ZN1lC2Ei(%class.l* %4, i32 %1)
-    %5 = getelementptr inbounds %class.s, %class.s* %0, i64 0, i32 2
-    tail call void @llvm.dbg.value(metadata %class.p* %5, i64 0, metadata !10, metadata !17), !dbg !18
-    tail call void @llvm.dbg.value(metadata %class.p* %5, i64 0, metadata !20, metadata !17), !dbg !27
-    %6 = getelementptr inbounds %class.s, %class.s* %0, i64 0, i32 2, i32 0, i32 1
-    %7 = bitcast %struct.a** %6 to i64*
-    %8 = load i64, i64* %7, align 8
-    %9 = bitcast %class.p* %5 to i64*
-    store i64 %8, i64* %9, align 8
-    %10 = getelementptr inbounds %class.s, %class.s* %0, i64 0, i32 3
-    store %class.p* %5, %class.p** %10, align 8
-    %11 = getelementptr inbounds %class.s, %class.s* %0, i64 0, i32 4
+    %4 = bitcast ptr %0 to ptr
+    tail call void @_ZN1lC2Ei(ptr %4, i32 %1)
+    %5 = getelementptr inbounds %class.s, ptr %0, i64 0, i32 2
+    tail call void @llvm.dbg.value(metadata ptr %5, i64 0, metadata !10, metadata !17), !dbg !18
+    tail call void @llvm.dbg.value(metadata ptr %5, i64 0, metadata !20, metadata !17), !dbg !27
+    %6 = getelementptr inbounds %class.s, ptr %0, i64 0, i32 2, i32 0, i32 1
+    %7 = bitcast ptr %6 to ptr
+    %8 = load i64, ptr %7, align 8
+    %9 = bitcast ptr %5 to ptr
+    store i64 %8, ptr %9, align 8
+    %10 = getelementptr inbounds %class.s, ptr %0, i64 0, i32 3
+    store ptr %5, ptr %10, align 8
+    %11 = getelementptr inbounds %class.s, ptr %0, i64 0, i32 4
     %12 = shl i32 -1, %1
-    store i32 %12, i32* %11, align 8
-    store i32 0, i32* %3, align 4
-    %13 = bitcast %class.p* %5 to i32**
-    %14 = load i32*, i32** %13, align 8
-    store i32* %14, i32** @n, align 8
-    %15 = icmp eq i32* %14, null
-    %16 = ptrtoint i32* %14 to i64
-    %17 = select i1 %15, i64 ptrtoint (i32* @o to i64), i64 0
+    store i32 %12, ptr %11, align 8
+    store i32 0, ptr %3, align 4
+    %13 = bitcast ptr %5 to ptr
+    %14 = load ptr, ptr %13, align 8
+    store ptr %14, ptr @n, align 8
+    %15 = icmp eq ptr %14, null
+    %16 = ptrtoint ptr %14 to i64
+    %17 = select i1 %15, i64 ptrtoint (ptr @o to i64), i64 0
     %18 = or i64 %17, %16
-    tail call void @llvm.dbg.value(metadata i32* %3, i64 0, metadata !29, metadata !35), !dbg !36
-    tail call void @llvm.dbg.value(metadata i32* %3, i64 0, metadata !39, metadata !17), !dbg !44
-    %19 = ptrtoint i32* %3 to i64
+    tail call void @llvm.dbg.value(metadata ptr %3, i64 0, metadata !29, metadata !35), !dbg !36
+    tail call void @llvm.dbg.value(metadata ptr %3, i64 0, metadata !39, metadata !17), !dbg !44
+    %19 = ptrtoint ptr %3 to i64
     call void @llvm.dbg.value(metadata i64 %19, i64 0, metadata !46, metadata !17), !dbg !48
     %20 = icmp eq i64 %18, 0
     %21 = select i1 %20, i64 %19, i64 0
     %22 = or i64 %21, %18
-    %23 = inttoptr i64 %22 to i32*
-    %24 = bitcast %class.s* %0 to i32*
-    %25 = load i32, i32* %24, align 8
+    %23 = inttoptr i64 %22 to ptr
+    %24 = bitcast ptr %0 to ptr
+    %25 = load i32, ptr %24, align 8
     %26 = sext i32 %25 to i64
-    %27 = getelementptr inbounds i32, i32* %23, i64 %26
-    %28 = load i32, i32* %27, align 4
+    %27 = getelementptr inbounds i32, ptr %23, i64 %26
+    %28 = load i32, ptr %27, align 4
     %29 = and i32 %12, %28
     %30 = icmp eq i32 %29, 0
     br i1 %30, label %47, label %31
 
   ; <label>:31:                                     ; preds = %2
-    %32 = bitcast %class.s* %0 to i32*
-    %33 = call i32 @_ZN1p2aaEv(%class.p* %5)
+    %32 = bitcast ptr %0 to ptr
+    %33 = call i32 @_ZN1p2aaEv(ptr %5)
     %34 = add nsw i32 %33, -1
     %35 = sdiv i32 %34, 2
-    %36 = load i32, i32* %32, align 8
+    %36 = load i32, ptr %32, align 8
     %37 = icmp sgt i32 %36, %35
     br i1 %37, label %38, label %47
 
@@ -153,15 +153,15 @@
     br label %39
 
   ; <label>:39:                                     ; preds = %39, %38
-    %40 = bitcast %class.s* %0 to i32*
-    %sunkaddr = ptrtoint %class.s* %0 to i64
+    %40 = bitcast ptr %0 to ptr
+    %sunkaddr = ptrtoint ptr %0 to i64
     %sunkaddr1 = add i64 %sunkaddr, 24
-    %sunkaddr2 = inttoptr i64 %sunkaddr1 to %class.p**
-    %41 = load %class.p*, %class.p** %sunkaddr2, align 8
-    %42 = call i32 @_ZN1p2aaEv(%class.p* %41)
+    %sunkaddr2 = inttoptr i64 %sunkaddr1 to ptr
+    %41 = load ptr, ptr %sunkaddr2, align 8
+    %42 = call i32 @_ZN1p2aaEv(ptr %41)
     %43 = add nsw i32 %42, -1
     %44 = sdiv i32 %43, 2
-    %45 = load i32, i32* %40, align 8
+    %45 = load i32, ptr %40, align 8
     %46 = icmp sgt i32 %45, %44
     br i1 %46, label %39, label %47
 
@@ -169,9 +169,9 @@
     ret void
   }
 
-  declare void @_ZN1lC2Ei(%class.l*, i32) unnamed_addr #1
+  declare void @_ZN1lC2Ei(ptr, i32) unnamed_addr #1
 
-  declare i32 @_ZN1p2aaEv(%class.p*) local_unnamed_addr #1
+  declare i32 @_ZN1p2aaEv(ptr) local_unnamed_addr #1
 
   ; Function Attrs: nounwind readnone
   declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2

diff  --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll
index a218c9d4dcea8..9455895a99b87 100644
--- a/llvm/test/CodeGen/X86/pr44140.ll
+++ b/llvm/test/CodeGen/X86/pr44140.ll
@@ -64,34 +64,34 @@ start:
   br label %fake-loop
 
 fake-loop:                                        ; preds = %fake-loop, %start
-  %dummy0.cast = bitcast [22 x i64]* %dummy0 to i8*
-  %dummy1.cast = bitcast [22 x i64]* %dummy1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 %dummy1.cast, i8* nonnull align 8 %dummy0.cast, i64 176, i1 false)
+  %dummy0.cast = bitcast ptr %dummy0 to ptr
+  %dummy1.cast = bitcast ptr %dummy1 to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 %dummy1.cast, ptr nonnull align 8 %dummy0.cast, i64 176, i1 false)
 
-  %dummy1.cast.copy = bitcast [22 x i64]* %dummy1 to i8*
-  %dummy2.cast = bitcast [22 x i64]* %dummy2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 176, i8* nonnull %dummy2.cast)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 8 %dummy2.cast, i8* nonnull align 8 %dummy1.cast.copy, i64 176, i1 false)
+  %dummy1.cast.copy = bitcast ptr %dummy1 to ptr
+  %dummy2.cast = bitcast ptr %dummy2 to ptr
+  call void @llvm.lifetime.start.p0(i64 176, ptr nonnull %dummy2.cast)
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 %dummy2.cast, ptr nonnull align 8 %dummy1.cast.copy, i64 176, i1 false)
 
   call win64cc void @opaque()
 
-  store <2 x i64> <i64 1010101010101010101, i64 2020202020202020202>, <2 x i64>* %data, align 8
+  store <2 x i64> <i64 1010101010101010101, i64 2020202020202020202>, ptr %data, align 8
 
   %opaque-false = icmp eq i8 0, 1
   br i1 %opaque-false, label %fake-loop, label %exit
 
 exit:                                             ; preds = %fake-loop
-  %data.cast = bitcast <2 x i64>* %data to i64*
-  %0 = load i64, i64* %data.cast, align 8
+  %data.cast = bitcast ptr %data to ptr
+  %0 = load i64, ptr %data.cast, align 8
   %1 = icmp eq i64 %0, 1010101010101010101
   %2 = select i1 %1, i32 0, i32 -1
   ret i32 %2
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/X86/pr48064.mir b/llvm/test/CodeGen/X86/pr48064.mir
index 1e1ea991e82c7..a6c927185881f 100644
--- a/llvm/test/CodeGen/X86/pr48064.mir
+++ b/llvm/test/CodeGen/X86/pr48064.mir
@@ -55,12 +55,12 @@
   source_filename = "test.cpp"
   target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
 
-  %rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
-  %eh.CatchableType = type { i32, i8*, i32, i32, i32, i32, i8* }
-  %eh.CatchableTypeArray.1 = type { i32, [1 x %eh.CatchableType*] }
-  %eh.ThrowInfo = type { i32, i8*, i8*, i8* }
-  %CXXExceptionRegistration = type { i8*, %EHRegistrationNode, i32 }
-  %EHRegistrationNode = type { %EHRegistrationNode*, i8* }
+  %rtti.TypeDescriptor2 = type { ptr, ptr, [3 x i8] }
+  %eh.CatchableType = type { i32, ptr, i32, i32, i32, i32, ptr }
+  %eh.CatchableTypeArray.1 = type { i32, [1 x ptr] }
+  %eh.ThrowInfo = type { i32, ptr, ptr, ptr }
+  %CXXExceptionRegistration = type { ptr, %EHRegistrationNode, i32 }
+  %EHRegistrationNode = type { ptr, ptr }
   %struct.object = type { i32 }
 
   $"_R0H at 8" = comdat any
@@ -72,14 +72,14 @@
   $_TI1H = comdat any
 
   @v__3HC = dso_local global i32 0, align 4
-  @"_7type_info__6B@" = external constant i8*
-  @"_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"_7type_info__6B@", i8* null, [3 x i8] c".H\00" }, comdat
-  @"_CT_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, i8* bitcast (%rtti.TypeDescriptor2* @"_R0H at 8" to i8*), i32 0, i32 -1, i32 0, i32 4, i8* null }, section ".xdata", comdat
-  @_CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x %eh.CatchableType*] [%eh.CatchableType* @"_CT_R0H at 84"] }, section ".xdata", comdat
-  @_TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i8* null, i8* null, i8* bitcast (%eh.CatchableTypeArray.1* @_CTA1H to i8*) }, section ".xdata", comdat
+  @"_7type_info__6B@" = external constant ptr
+  @"_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { ptr @"_7type_info__6B@", ptr null, [3 x i8] c".H\00" }, comdat
+  @"_CT_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, ptr @"_R0H at 8", i32 0, i32 -1, i32 0, i32 4, ptr null }, section ".xdata", comdat
+  @_CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x ptr] [ptr @"_CT_R0H at 84"] }, section ".xdata", comdat
+  @_TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, ptr null, ptr null, ptr @_CTA1H }, section ".xdata", comdat
 
   ; Function Attrs: noinline nounwind sspstrong
-  define weak dso_local void @"escape__YAXPAH at Z"(i32* %p) local_unnamed_addr #0 {
+  define weak dso_local void @"escape__YAXPAH at Z"(ptr %p) local_unnamed_addr #0 {
   entry:
     ret void
   }
@@ -88,36 +88,36 @@
   define dso_local i32 @main() local_unnamed_addr #1 personality i32 (...)* @__CxxFrameHandler3 {
   entry:
     %0 = alloca %CXXExceptionRegistration, align 4
-    %1 = bitcast %CXXExceptionRegistration* %0 to i8*
-    call void @llvm.x86.seh.ehregnode(i8* %1)
-    %2 = call i8* @llvm.stacksave()
-    %3 = getelementptr inbounds %CXXExceptionRegistration, %CXXExceptionRegistration* %0, i32 0, i32 0
-    store i8* %2, i8** %3, align 4
-    %4 = getelementptr inbounds %CXXExceptionRegistration, %CXXExceptionRegistration* %0, i32 0, i32 2
-    store i32 -1, i32* %4, align 4
-    %5 = getelementptr inbounds %CXXExceptionRegistration, %CXXExceptionRegistration* %0, i32 0, i32 1
-    %6 = getelementptr inbounds %EHRegistrationNode, %EHRegistrationNode* %5, i32 0, i32 1
-    store i8* bitcast (i32 (i8*, i8*, i8*, i8*)* @"__ehhandler$main" to i8*), i8** %6, align 4
-    %7 = load %EHRegistrationNode*, %EHRegistrationNode* addrspace(257)* null, align 4
-    %8 = getelementptr inbounds %EHRegistrationNode, %EHRegistrationNode* %5, i32 0, i32 0
-    store %EHRegistrationNode* %7, %EHRegistrationNode** %8, align 4
-    store %EHRegistrationNode* %5, %EHRegistrationNode* addrspace(257)* null, align 4
+    %1 = bitcast ptr %0 to ptr
+    call void @llvm.x86.seh.ehregnode(ptr %1)
+    %2 = call ptr @llvm.stacksave()
+    %3 = getelementptr inbounds %CXXExceptionRegistration, ptr %0, i32 0, i32 0
+    store ptr %2, ptr %3, align 4
+    %4 = getelementptr inbounds %CXXExceptionRegistration, ptr %0, i32 0, i32 2
+    store i32 -1, ptr %4, align 4
+    %5 = getelementptr inbounds %CXXExceptionRegistration, ptr %0, i32 0, i32 1
+    %6 = getelementptr inbounds %EHRegistrationNode, ptr %5, i32 0, i32 1
+    store ptr @"__ehhandler$main", ptr %6, align 4
+    %7 = load ptr, ptr addrspace(257) null, align 4
+    %8 = getelementptr inbounds %EHRegistrationNode, ptr %5, i32 0, i32 0
+    store ptr %7, ptr %8, align 4
+    store ptr %5, ptr addrspace(257) null, align 4
     %tmp.i.i = alloca i32, align 4
     %o.i = alloca %struct.object, align 4
-    %zx = alloca i32*, align 4
-    %exp.i = alloca i32*, align 4
-    %9 = bitcast i32** %exp.i to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %9)
-    %10 = bitcast %struct.object* %o.i to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %10) #7
-    %i.i.i1 = bitcast %struct.object* %o.i to i32*
-    store i32 1, i32* %i.i.i1, align 4
-    %11 = bitcast i32* %tmp.i.i to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %11)
-    store i32 999, i32* %tmp.i.i, align 4
-    %12 = getelementptr inbounds %CXXExceptionRegistration, %CXXExceptionRegistration* %0, i32 0, i32 2
-    store i32 1, i32* %12, align 4
-    invoke void @_CxxThrowException(i8* nonnull %11, %eh.ThrowInfo* nonnull @_TI1H) #8
+    %zx = alloca ptr, align 4
+    %exp.i = alloca ptr, align 4
+    %9 = bitcast ptr %exp.i to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %9)
+    %10 = bitcast ptr %o.i to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %10) #7
+    %i.i.i1 = bitcast ptr %o.i to ptr
+    store i32 1, ptr %i.i.i1, align 4
+    %11 = bitcast ptr %tmp.i.i to ptr
+    call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %11)
+    store i32 999, ptr %tmp.i.i, align 4
+    %12 = getelementptr inbounds %CXXExceptionRegistration, ptr %0, i32 0, i32 2
+    store i32 1, ptr %12, align 4
+    invoke void @_CxxThrowException(ptr nonnull %11, ptr nonnull @_TI1H) #8
             to label %.noexc.i unwind label %ehcleanup.i
 
   .noexc.i:                                         ; preds = %entry
@@ -125,64 +125,64 @@
 
   ehcleanup.i:                                      ; preds = %entry
     %13 = cleanuppad within none []
-    %14 = bitcast %struct.object* %o.i to i32*
-    %15 = bitcast %struct.object* %o.i to i8*
-    store i32 9999, i32* %14, align 4
-    call void @"escape__YAXPAH at Z"(i32* nonnull %14) #7 [ "funclet"(token %13) ]
-    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %15) #7
+    %14 = bitcast ptr %o.i to ptr
+    %15 = bitcast ptr %o.i to ptr
+    store i32 9999, ptr %14, align 4
+    call void @"escape__YAXPAH at Z"(ptr nonnull %14) #7 [ "funclet"(token %13) ]
+    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %15) #7
     cleanupret from %13 unwind label %catch.dispatch.i
 
   catch.dispatch.i:                                 ; preds = %ehcleanup.i
     %16 = catchswitch within none [label %catch.i] unwind to caller
 
   catch.i:                                          ; preds = %catch.dispatch.i
-    %17 = catchpad within %16 [%rtti.TypeDescriptor2* @"_R0H at 8", i32 8, i32** %exp.i]
-    %18 = load i32*, i32** %exp.i, align 4
-    %19 = load i32, i32* %18, align 4
-    store atomic volatile i32 %19, i32* @v__3HC release, align 4
+    %17 = catchpad within %16 [ptr @"_R0H at 8", i32 8, ptr %exp.i]
+    %18 = load ptr, ptr %exp.i, align 4
+    %19 = load i32, ptr %18, align 4
+    store atomic volatile i32 %19, ptr @v__3HC release, align 4
     catchret from %17 to label %func__YAXXZ.exit
 
   func__YAXXZ.exit:                                 ; preds = %catch.i
-    %20 = bitcast i32** %exp.i to i8*
-    call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %20)
-    %21 = getelementptr inbounds %CXXExceptionRegistration, %CXXExceptionRegistration* %0, i32 0, i32 1
-    %22 = getelementptr inbounds %EHRegistrationNode, %EHRegistrationNode* %21, i32 0, i32 0
-    %23 = load %EHRegistrationNode*, %EHRegistrationNode** %22, align 4
-    store %EHRegistrationNode* %23, %EHRegistrationNode* addrspace(257)* null, align 4
+    %20 = bitcast ptr %exp.i to ptr
+    call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %20)
+    %21 = getelementptr inbounds %CXXExceptionRegistration, ptr %0, i32 0, i32 1
+    %22 = getelementptr inbounds %EHRegistrationNode, ptr %21, i32 0, i32 0
+    %23 = load ptr, ptr %22, align 4
+    store ptr %23, ptr addrspace(257) null, align 4
     ret i32 0
   }
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #2
+  declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
 
   ; Function Attrs: nofree
   declare dso_local i32 @__CxxFrameHandler3(...) #3
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #2
+  declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
 
   ; Function Attrs: nofree
-  declare dso_local x86_stdcallcc void @_CxxThrowException(i8*, %eh.ThrowInfo*) local_unnamed_addr #3
+  declare dso_local x86_stdcallcc void @_CxxThrowException(ptr, ptr) local_unnamed_addr #3
 
-  declare i32 @_setjmp3(i8*, i32, ...)
+  declare i32 @_setjmp3(ptr, i32, ...)
 
   ; Function Attrs: nofree nosync nounwind willreturn
-  declare i8* @llvm.stacksave() #4
+  declare ptr @llvm.stacksave() #4
 
-  define internal i32 @"__ehhandler$main"(i8* %0, i8* %1, i8* %2, i8* %3) #5 {
+  define internal i32 @"__ehhandler$main"(ptr %0, ptr %1, ptr %2, ptr %3) #5 {
   entry:
-    %4 = call i8* @llvm.x86.seh.lsda(i8* bitcast (i32 ()* @main to i8*))
-    %5 = tail call i32 bitcast (i32 (...)* @__CxxFrameHandler3 to i32 (i8*, i8*, i8*, i8*, i8*)*)(i8* inreg %4, i8* %0, i8* %1, i8* %2, i8* %3)
+    %4 = call ptr @llvm.x86.seh.lsda(ptr @main)
+    %5 = tail call i32 @__CxxFrameHandler3(ptr inreg %4, ptr %0, ptr %1, ptr %2, ptr %3)
     ret i32 %5
   }
 
   ; Function Attrs: nounwind readnone
-  declare i8* @llvm.x86.seh.lsda(i8*) #6
+  declare ptr @llvm.x86.seh.lsda(ptr) #6
 
-  declare x86_stdcallcc void @__CxxLongjmpUnwind(i8*)
+  declare x86_stdcallcc void @__CxxLongjmpUnwind(ptr)
 
   ; Function Attrs: nounwind
-  declare void @llvm.x86.seh.ehregnode(i8*) #7
+  declare void @llvm.x86.seh.ehregnode(ptr) #7
 
   attributes #0 = { noinline nounwind sspstrong "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { norecurse sspstrong "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
@@ -320,9 +320,9 @@ body:             |
     MOV32mi %stack.0.zx, 1, $noreg, 12, $noreg, -1 :: (store (s32) into %ir.4)
     %1:gr32 = nuw LEA32r %stack.0.zx, 1, $noreg, 4, $noreg
     MOV32mi %stack.0.zx, 1, $noreg, 8, $noreg, @"__ehhandler$main" :: (store (s32) into %ir.6)
-    %2:gr32 = MOV32rm $noreg, 1, $noreg, 0, $fs :: (load (s32) from `%EHRegistrationNode* addrspace(257)* null`, addrspace 257)
+    %2:gr32 = MOV32rm $noreg, 1, $noreg, 0, $fs :: (load (s32) from `ptr addrspace(257) null`, addrspace 257)
     MOV32mr %stack.0.zx, 1, $noreg, 4, $noreg, killed %2 :: (store (s32) into %ir.8)
-    MOV32mr $noreg, 1, $noreg, 0, $fs, killed %1 :: (store (s32) into `%EHRegistrationNode* addrspace(257)* null`, addrspace 257)
+    MOV32mr $noreg, 1, $noreg, 0, $fs, killed %1 :: (store (s32) into `ptr addrspace(257) null`, addrspace 257)
     MOV32mi %stack.2.o.i, 1, $noreg, 0, $noreg, 1 :: (store (s32) into %ir.i.i.i1)
     MOV32mi %stack.1.tmp.i.i, 1, $noreg, 0, $noreg, 999 :: (store (s32) into %ir.tmp.i.i)
     MOV32mi %stack.0.zx, 1, $noreg, 12, $noreg, 1 :: (store (s32) into %ir.12)
@@ -366,7 +366,7 @@ body:             |
 
   bb.5.func__YAXXZ.exit:
     %9:gr32 = MOV32rm %stack.0.zx, 1, $noreg, 4, $noreg :: (dereferenceable load (s32) from %ir.22)
-    MOV32mr $noreg, 1, $noreg, 0, $fs, killed %9 :: (store (s32) into `%EHRegistrationNode* addrspace(257)* null`, addrspace 257)
+    MOV32mr $noreg, 1, $noreg, 0, $fs, killed %9 :: (store (s32) into `ptr addrspace(257) null`, addrspace 257)
     %10:gr32 = MOV32r0 implicit-def dead $eflags
     $eax = COPY %10
     RET 0, $eax

diff  --git a/llvm/test/CodeGen/X86/pre-coalesce-2.ll b/llvm/test/CodeGen/X86/pre-coalesce-2.ll
index 55e9fb6ee184b..1b1b7c7fdc7d8 100644
--- a/llvm/test/CodeGen/X86/pre-coalesce-2.ll
+++ b/llvm/test/CodeGen/X86/pre-coalesce-2.ll
@@ -75,7 +75,7 @@ while.body:                                       ; preds = %56, %while.body.pre
   %e.058 = phi ptr [ %incdec.ptr60, %56 ], [ %e.0.ph, %while.body.preheader ]
   %incdec.ptr60 = getelementptr inbounds i8, ptr %p1.addr.0.ph.pn, i64 1
   %conv = sext i8 %21 to i32
-  %call = tail call i32 (ptr, i32, ...) @fn3(ptr getelementptr inbounds ({ [17 x i8], [47 x i8] }, ptr @.str, i32 0, i32 0, i64 0), i32 %conv) #2
+  %call = tail call i32 (ptr, i32, ...) @fn3(ptr @.str, i32 %conv) #2
   call void @__sanitizer_cov_trace_cmp4(i32 %call, i32 0)
   %tobool = icmp eq i32 %call, 0
   br i1 %tobool, label %if.end5, label %cleanup

diff  --git a/llvm/test/CodeGen/X86/sad.ll b/llvm/test/CodeGen/X86/sad.ll
index d043234705aa0..2a33e75a8357c 100644
--- a/llvm/test/CodeGen/X86/sad.ll
+++ b/llvm/test/CodeGen/X86/sad.ll
@@ -117,13 +117,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <16 x i8>, ptr %1, align 4
   %2 = zext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <16 x i8>, ptr %4, align 4
   %5 = zext <16 x i8> %wide.load1 to <16 x i32>
   %6 = sub nsw <16 x i32> %2, %5
   %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -274,13 +274,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <32 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <32 x i8>*
-  %wide.load = load <32 x i8>, <32 x i8>* %1, align 32
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <32 x i8>, ptr %1, align 32
   %2 = zext <32 x i8> %wide.load to <32 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <32 x i8>*
-  %wide.load1 = load <32 x i8>, <32 x i8>* %4, align 32
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <32 x i8>, ptr %4, align 32
   %5 = zext <32 x i8> %wide.load1 to <32 x i32>
   %6 = sub nsw <32 x i32> %2, %5
   %7 = icmp sgt <32 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -505,13 +505,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <64 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <64 x i8>*
-  %wide.load = load <64 x i8>, <64 x i8>* %1, align 64
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <64 x i8>, ptr %1, align 64
   %2 = zext <64 x i8> %wide.load to <64 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <64 x i8>*
-  %wide.load1 = load <64 x i8>, <64 x i8>* %4, align 64
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <64 x i8>, ptr %4, align 64
   %5 = zext <64 x i8> %wide.load1 to <64 x i32>
   %6 = sub nsw <64 x i32> %2, %5
   %7 = icmp sgt <64 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -589,13 +589,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <2 x i8>*
-  %wide.load = load <2 x i8>, <2 x i8>* %1, align 4
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <2 x i8>, ptr %1, align 4
   %2 = zext <2 x i8> %wide.load to <2 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <2 x i8>*
-  %wide.load1 = load <2 x i8>, <2 x i8>* %4, align 4
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <2 x i8>, ptr %4, align 4
   %5 = zext <2 x i8> %wide.load1 to <2 x i32>
   %6 = sub nsw <2 x i32> %2, %5
   %7 = icmp sgt <2 x i32> %6, <i32 -1, i32 -1>
@@ -661,13 +661,13 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
-  %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
-  %1 = bitcast i8* %0 to <4 x i8>*
-  %wide.load = load <4 x i8>, <4 x i8>* %1, align 4
+  %0 = getelementptr inbounds [1024 x i8], ptr @a, i64 0, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x i8>, ptr %1, align 4
   %2 = zext <4 x i8> %wide.load to <4 x i32>
-  %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
-  %4 = bitcast i8* %3 to <4 x i8>*
-  %wide.load1 = load <4 x i8>, <4 x i8>* %4, align 4
+  %3 = getelementptr inbounds [1024 x i8], ptr @b, i64 0, i64 %index
+  %4 = bitcast ptr %3 to ptr
+  %wide.load1 = load <4 x i8>, ptr %4, align 4
   %5 = zext <4 x i8> %wide.load1 to <4 x i32>
   %6 = sub nsw <4 x i32> %2, %5
   %7 = icmp sgt <4 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -688,7 +688,7 @@ middle.block:
 }
 
 
-define dso_local i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
+define dso_local i32 @sad_nonloop_4i8(ptr nocapture readonly %p, i64, ptr nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_4i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -704,9 +704,9 @@ define dso_local i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x
 ; AVX-NEXT:    vpsadbw %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
-  %v1 = load <4 x i8>, <4 x i8>* %p, align 1
+  %v1 = load <4 x i8>, ptr %p, align 1
   %z1 = zext <4 x i8> %v1 to <4 x i32>
-  %v2 = load <4 x i8>, <4 x i8>* %q, align 1
+  %v2 = load <4 x i8>, ptr %q, align 1
   %z2 = zext <4 x i8> %v2 to <4 x i32>
   %sub = sub nsw <4 x i32> %z1, %z2
   %isneg = icmp sgt <4 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -720,7 +720,7 @@ define dso_local i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x
   ret i32 %sum
 }
 
-define dso_local i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
+define dso_local i32 @sad_nonloop_8i8(ptr nocapture readonly %p, i64, ptr nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_8i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
@@ -736,9 +736,9 @@ define dso_local i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x
 ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
-  %v1 = load <8 x i8>, <8 x i8>* %p, align 1
+  %v1 = load <8 x i8>, ptr %p, align 1
   %z1 = zext <8 x i8> %v1 to <8 x i32>
-  %v2 = load <8 x i8>, <8 x i8>* %q, align 1
+  %v2 = load <8 x i8>, ptr %q, align 1
   %z2 = zext <8 x i8> %v2 to <8 x i32>
   %sub = sub nsw <8 x i32> %z1, %z2
   %isneg = icmp sgt <8 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -754,7 +754,7 @@ define dso_local i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x
   ret i32 %sum
 }
 
-define dso_local i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
+define dso_local i32 @sad_nonloop_16i8(ptr nocapture readonly %p, i64, ptr nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_16i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
@@ -773,9 +773,9 @@ define dso_local i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <1
 ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
-  %v1 = load <16 x i8>, <16 x i8>* %p, align 1
+  %v1 = load <16 x i8>, ptr %p, align 1
   %z1 = zext <16 x i8> %v1 to <16 x i32>
-  %v2 = load <16 x i8>, <16 x i8>* %q, align 1
+  %v2 = load <16 x i8>, ptr %q, align 1
   %z2 = zext <16 x i8> %v2 to <16 x i32>
   %sub = sub nsw <16 x i32> %z1, %z2
   %isneg = icmp sgt <16 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -793,7 +793,7 @@ define dso_local i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <1
   ret i32 %sum
 }
 
-define dso_local i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
+define dso_local i32 @sad_nonloop_32i8(ptr nocapture readonly %p, i64, ptr nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_32i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdx), %xmm0
@@ -843,9 +843,9 @@ define dso_local i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <3
 ; AVX512-NEXT:    vmovd %xmm0, %eax
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
-  %v1 = load <32 x i8>, <32 x i8>* %p, align 1
+  %v1 = load <32 x i8>, ptr %p, align 1
   %z1 = zext <32 x i8> %v1 to <32 x i32>
-  %v2 = load <32 x i8>, <32 x i8>* %q, align 1
+  %v2 = load <32 x i8>, ptr %q, align 1
   %z2 = zext <32 x i8> %v2 to <32 x i32>
   %sub = sub nsw <32 x i32> %z1, %z2
   %isneg = icmp sgt <32 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -865,7 +865,7 @@ define dso_local i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <3
   ret i32 %sum
 }
 
-define dso_local i32 @sad_nonloop_64i8(<64 x i8>* nocapture readonly %p, i64, <64 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
+define dso_local i32 @sad_nonloop_64i8(ptr nocapture readonly %p, i64, ptr nocapture readonly %q) local_unnamed_addr #0 {
 ; SSE2-LABEL: sad_nonloop_64i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqu (%rdx), %xmm0
@@ -950,9 +950,9 @@ define dso_local i32 @sad_nonloop_64i8(<64 x i8>* nocapture readonly %p, i64, <6
 ; AVX512BW-NEXT:    vmovd %xmm0, %eax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
-  %v1 = load <64 x i8>, <64 x i8>* %p, align 1
+  %v1 = load <64 x i8>, ptr %p, align 1
   %z1 = zext <64 x i8> %v1 to <64 x i32>
-  %v2 = load <64 x i8>, <64 x i8>* %q, align 1
+  %v2 = load <64 x i8>, ptr %q, align 1
   %z2 = zext <64 x i8> %v2 to <64 x i32>
   %sub = sub nsw <64 x i32> %z1, %z2
   %isneg = icmp sgt <64 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -976,7 +976,7 @@ define dso_local i32 @sad_nonloop_64i8(<64 x i8>* nocapture readonly %p, i64, <6
 
 ; This contains an unrolled sad loop with a non-zero initial value.
 ; DAGCombiner reassociation previously rewrote the adds to move the constant vector further down the tree. This resulted in the vector-reduction flag being lost.
-define dso_local i32 @sad_unroll_nonzero_initial(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) {
+define dso_local i32 @sad_unroll_nonzero_initial(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) {
 ; SSE2-LABEL: sad_unroll_nonzero_initial:
 ; SSE2:       # %bb.0: # %bb
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
@@ -1009,8 +1009,8 @@ define dso_local i32 @sad_unroll_nonzero_initial(<16 x i8>* %arg, <16 x i8>* %ar
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
 bb:
-  %tmp = load <16 x i8>, <16 x i8>* %arg, align 1
-  %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1
+  %tmp = load <16 x i8>, ptr %arg, align 1
+  %tmp4 = load <16 x i8>, ptr %arg1, align 1
   %tmp5 = zext <16 x i8> %tmp to <16 x i32>
   %tmp6 = zext <16 x i8> %tmp4 to <16 x i32>
   %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6
@@ -1018,8 +1018,8 @@ bb:
   %tmp9 = sub nsw <16 x i32> zeroinitializer, %tmp7
   %tmp10 = select <16 x i1> %tmp8, <16 x i32> %tmp9, <16 x i32> %tmp7
   %tmp11 = add nuw nsw <16 x i32> %tmp10, <i32 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
-  %tmp12 = load <16 x i8>, <16 x i8>* %arg2, align 1
-  %tmp13 = load <16 x i8>, <16 x i8>* %arg3, align 1
+  %tmp12 = load <16 x i8>, ptr %arg2, align 1
+  %tmp13 = load <16 x i8>, ptr %arg3, align 1
   %tmp14 = zext <16 x i8> %tmp12 to <16 x i32>
   %tmp15 = zext <16 x i8> %tmp13 to <16 x i32>
   %tmp16 = sub nsw <16 x i32> %tmp14, %tmp15
@@ -1041,7 +1041,7 @@ bb:
 
 ; This test contains two absolute 
diff erence patterns joined by an add. The result of that add is then reduced to a single element.
 ; SelectionDAGBuilder should tag the joining add as a vector reduction. We neeed to recognize that both sides can use psadbw.
-define dso_local i32 @sad_double_reduction(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) {
+define dso_local i32 @sad_double_reduction(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) {
 ; SSE2-LABEL: sad_double_reduction:
 ; SSE2:       # %bb.0: # %bb
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
@@ -1072,16 +1072,16 @@ define dso_local i32 @sad_double_reduction(<16 x i8>* %arg, <16 x i8>* %arg1, <1
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
 bb:
-  %tmp = load <16 x i8>, <16 x i8>* %arg, align 1
-  %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1
+  %tmp = load <16 x i8>, ptr %arg, align 1
+  %tmp4 = load <16 x i8>, ptr %arg1, align 1
   %tmp5 = zext <16 x i8> %tmp to <16 x i32>
   %tmp6 = zext <16 x i8> %tmp4 to <16 x i32>
   %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6
   %tmp8 = icmp slt <16 x i32> %tmp7, zeroinitializer
   %tmp9 = sub nsw <16 x i32> zeroinitializer, %tmp7
   %tmp10 = select <16 x i1> %tmp8, <16 x i32> %tmp9, <16 x i32> %tmp7
-  %tmp11 = load <16 x i8>, <16 x i8>* %arg2, align 1
-  %tmp12 = load <16 x i8>, <16 x i8>* %arg3, align 1
+  %tmp11 = load <16 x i8>, ptr %arg2, align 1
+  %tmp12 = load <16 x i8>, ptr %arg3, align 1
   %tmp13 = zext <16 x i8> %tmp11 to <16 x i32>
   %tmp14 = zext <16 x i8> %tmp12 to <16 x i32>
   %tmp15 = sub nsw <16 x i32> %tmp13, %tmp14
@@ -1103,7 +1103,7 @@ bb:
 
 ; This test contains two absolute 
diff erence patterns joined by an add. The result of that add is then reduced to a single element.
 ; SelectionDAGBuilder should tag the joining add as a vector reduction. We neeed to recognize that both sides can use psadbw.
-define dso_local i32 @sad_double_reduction_abs(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) {
+define dso_local i32 @sad_double_reduction_abs(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) {
 ; SSE2-LABEL: sad_double_reduction_abs:
 ; SSE2:       # %bb.0: # %bb
 ; SSE2-NEXT:    movdqu (%rdi), %xmm0
@@ -1134,14 +1134,14 @@ define dso_local i32 @sad_double_reduction_abs(<16 x i8>* %arg, <16 x i8>* %arg1
 ; AVX-NEXT:    vmovd %xmm0, %eax
 ; AVX-NEXT:    retq
 bb:
-  %tmp = load <16 x i8>, <16 x i8>* %arg, align 1
-  %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1
+  %tmp = load <16 x i8>, ptr %arg, align 1
+  %tmp4 = load <16 x i8>, ptr %arg1, align 1
   %tmp5 = zext <16 x i8> %tmp to <16 x i32>
   %tmp6 = zext <16 x i8> %tmp4 to <16 x i32>
   %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6
   %tmp10 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %tmp7, i1 false)
-  %tmp11 = load <16 x i8>, <16 x i8>* %arg2, align 1
-  %tmp12 = load <16 x i8>, <16 x i8>* %arg3, align 1
+  %tmp11 = load <16 x i8>, ptr %arg2, align 1
+  %tmp12 = load <16 x i8>, ptr %arg3, align 1
   %tmp13 = zext <16 x i8> %tmp11 to <16 x i32>
   %tmp14 = zext <16 x i8> %tmp12 to <16 x i32>
   %tmp15 = sub nsw <16 x i32> %tmp13, %tmp14

diff  --git a/llvm/test/CodeGen/X86/select-neg.ll b/llvm/test/CodeGen/X86/select-neg.ll
index cbefbe5cac4b8..80804a3b1c60a 100644
--- a/llvm/test/CodeGen/X86/select-neg.ll
+++ b/llvm/test/CodeGen/X86/select-neg.ll
@@ -11,7 +11,7 @@ define i32 @function(i32 %arg1) {
 ; CHECK-NEXT:    negl %eax
 ; CHECK-NEXT:    retl
 entry:
-  %0 = xor i32 %arg1, xor (i32 ptrtoint (i32* @value1 to i32), i32 -1)
+  %0 = xor i32 %arg1, xor (i32 ptrtoint (ptr @value1 to i32), i32 -1)
   %.neg = add i32 %0, 1
   ret i32 %.neg
 }

diff  --git a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index db2f78bf5eedc..f2e48c7f308e5 100644
--- a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -970,13 +970,13 @@ define i32 @test_MM_GET_EXCEPTION_MASK() nounwind {
 ; X64-AVX-NEXT:    # imm = 0x1F80
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1, align 4
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1, align 4
   %4 = and i32 %3, 8064
   ret i32 %4
 }
-declare void @llvm.x86.sse.stmxcsr(i8*) nounwind readnone
+declare void @llvm.x86.sse.stmxcsr(ptr) nounwind readnone
 
 define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
 ; X86-SSE-LABEL: test_MM_GET_EXCEPTION_STATE:
@@ -1015,9 +1015,9 @@ define i32 @test_MM_GET_EXCEPTION_STATE() nounwind {
 ; X64-AVX-NEXT:    andl $63, %eax # encoding: [0x83,0xe0,0x3f]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1, align 4
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1, align 4
   %4 = and i32 %3, 63
   ret i32 %4
 }
@@ -1063,9 +1063,9 @@ define i32 @test_MM_GET_FLUSH_ZERO_MODE() nounwind {
 ; X64-AVX-NEXT:    # imm = 0x8000
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1, align 4
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1, align 4
   %4 = and i32 %3, 32768
   ret i32 %4
 }
@@ -1111,9 +1111,9 @@ define i32 @test_MM_GET_ROUNDING_MODE() nounwind {
 ; X64-AVX-NEXT:    # imm = 0x6000
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1, align 4
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1, align 4
   %4 = and i32 %3, 24576
   ret i32 %4
 }
@@ -1151,13 +1151,13 @@ define i32 @test_mm_getcsr() nounwind {
 ; X64-AVX-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # encoding: [0x8b,0x44,0x24,0xfc]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1, align 4
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1, align 4
   ret i32 %3
 }
 
-define <4 x float> @test_mm_load_ps(float* %a0) nounwind {
+define <4 x float> @test_mm_load_ps(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_load_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1190,12 +1190,12 @@ define <4 x float> @test_mm_load_ps(float* %a0) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  %res = load <4 x float>, <4 x float>* %arg0, align 16
+  %arg0 = bitcast ptr %a0 to ptr
+  %res = load <4 x float>, ptr %arg0, align 16
   ret <4 x float> %res
 }
 
-define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
+define <4 x float> @test_mm_load_ps1(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_load_ps1:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1234,7 +1234,7 @@ define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ld = load float, float* %a0, align 4
+  %ld = load float, ptr %a0, align 4
   %res0 = insertelement <4 x float> undef, float %ld, i32 0
   %res1 = insertelement <4 x float> %res0, float %ld, i32 1
   %res2 = insertelement <4 x float> %res1, float %ld, i32 2
@@ -1242,7 +1242,7 @@ define <4 x float> @test_mm_load_ps1(float* %a0) nounwind {
   ret <4 x float> %res3
 }
 
-define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
+define <4 x float> @test_mm_load_ss(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_load_ss:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1281,7 +1281,7 @@ define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
 ; X64-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ld = load float, float* %a0, align 1
+  %ld = load float, ptr %a0, align 1
   %res0 = insertelement <4 x float> undef, float %ld, i32 0
   %res1 = insertelement <4 x float> %res0, float 0.0, i32 1
   %res2 = insertelement <4 x float> %res1, float 0.0, i32 2
@@ -1289,7 +1289,7 @@ define <4 x float> @test_mm_load_ss(float* %a0) nounwind {
   ret <4 x float> %res3
 }
 
-define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
+define <4 x float> @test_mm_load1_ps(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_load1_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1328,7 +1328,7 @@ define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vbroadcastss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ld = load float, float* %a0, align 4
+  %ld = load float, ptr %a0, align 4
   %res0 = insertelement <4 x float> undef, float %ld, i32 0
   %res1 = insertelement <4 x float> %res0, float %ld, i32 1
   %res2 = insertelement <4 x float> %res1, float %ld, i32 2
@@ -1336,7 +1336,7 @@ define <4 x float> @test_mm_load1_ps(float* %a0) nounwind {
   ret <4 x float> %res3
 }
 
-define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
+define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, ptr %a1) {
 ; X86-SSE-LABEL: test_mm_loadh_pi:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1375,14 +1375,14 @@ define <4 x float> @test_mm_loadh_pi(<4 x float> %a0, x86_mmx* %a1) {
 ; X64-AVX512-NEXT:    vmovhps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0x07]
 ; X64-AVX512-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a1 to <2 x float>*
-  %ld  = load <2 x float>, <2 x float>* %ptr
+  %ptr = bitcast ptr %a1 to ptr
+  %ld  = load <2 x float>, ptr %ptr
   %ext = shufflevector <2 x float> %ld, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %res = shufflevector <4 x float> %a0, <4 x float> %ext, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   ret <4 x float> %res
 }
 
-define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
+define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, ptr %a1) {
 ; X86-SSE-LABEL: test_mm_loadl_pi:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1421,14 +1421,14 @@ define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
 ; X64-AVX512-NEXT:    vmovlps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x12,0x07]
 ; X64-AVX512-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a1 to <2 x float>*
-  %ld  = load <2 x float>, <2 x float>* %ptr
+  %ptr = bitcast ptr %a1 to ptr
+  %ld  = load <2 x float>, ptr %ptr
   %ext = shufflevector <2 x float> %ld, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %res = shufflevector <4 x float> %a0, <4 x float> %ext, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
   ret <4 x float> %res
 }
 
-define <4 x float> @test_mm_loadr_ps(float* %a0) nounwind {
+define <4 x float> @test_mm_loadr_ps(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_loadr_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1469,13 +1469,13 @@ define <4 x float> @test_mm_loadr_ps(float* %a0) nounwind {
 ; X64-AVX512-NEXT:    vpermilps $27, (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x04,0x07,0x1b]
 ; X64-AVX512-NEXT:    # xmm0 = mem[3,2,1,0]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  %ld = load <4 x float>, <4 x float>* %arg0, align 16
+  %arg0 = bitcast ptr %a0 to ptr
+  %ld = load <4 x float>, ptr %arg0, align 16
   %res = shufflevector <4 x float> %ld, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x float> %res
 }
 
-define <4 x float> @test_mm_loadu_ps(float* %a0) nounwind {
+define <4 x float> @test_mm_loadu_ps(ptr %a0) nounwind {
 ; X86-SSE-LABEL: test_mm_loadu_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1508,8 +1508,8 @@ define <4 x float> @test_mm_loadu_ps(float* %a0) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovups (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  %res = load <4 x float>, <4 x float>* %arg0, align 1
+  %arg0 = bitcast ptr %a0 to ptr
+  %res = load <4 x float>, ptr %arg0, align 1
   ret <4 x float> %res
 }
 
@@ -1731,7 +1731,7 @@ define <4 x float> @test_mm_or_ps(<4 x float> %a0, <4 x float> %a1) nounwind {
   ret <4 x float> %bc
 }
 
-define void @test_mm_prefetch(i8* %a0) {
+define void @test_mm_prefetch(ptr %a0) {
 ; X86-LABEL: test_mm_prefetch:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -1742,10 +1742,10 @@ define void @test_mm_prefetch(i8* %a0) {
 ; X64:       # %bb.0:
 ; X64-NEXT:    prefetchnta (%rdi) # encoding: [0x0f,0x18,0x07]
 ; X64-NEXT:    retq # encoding: [0xc3]
-  call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
+  call void @llvm.prefetch(ptr %a0, i32 0, i32 0, i32 1)
   ret void
 }
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind readnone
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind readnone
 
 define <4 x float> @test_mm_rcp_ps(<4 x float> %a0) {
 ; SSE-LABEL: test_mm_rcp_ps:
@@ -1862,16 +1862,16 @@ define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
 ; X64-AVX-NEXT:    vldmxcsr (%rax) # encoding: [0xc5,0xf8,0xae,0x10]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1
   %4 = and i32 %3, -8065
   %5 = or i32 %4, %a0
-  store i32 %5, i32* %1
-  call void @llvm.x86.sse.ldmxcsr(i8* %2)
+  store i32 %5, ptr %1
+  call void @llvm.x86.sse.ldmxcsr(ptr %2)
   ret void
 }
-declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind readnone
+declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind readnone
 
 define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
 ; X86-SSE-LABEL: test_MM_SET_EXCEPTION_STATE:
@@ -1924,13 +1924,13 @@ define void @test_MM_SET_EXCEPTION_STATE(i32 %a0) nounwind {
 ; X64-AVX-NEXT:    vldmxcsr (%rax) # encoding: [0xc5,0xf8,0xae,0x10]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1
   %4 = and i32 %3, -64
   %5 = or i32 %4, %a0
-  store i32 %5, i32* %1
-  call void @llvm.x86.sse.ldmxcsr(i8* %2)
+  store i32 %5, ptr %1
+  call void @llvm.x86.sse.ldmxcsr(ptr %2)
   ret void
 }
 
@@ -1989,13 +1989,13 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
 ; X64-AVX-NEXT:    vldmxcsr (%rax) # encoding: [0xc5,0xf8,0xae,0x10]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1
   %4 = and i32 %3, -32769
   %5 = or i32 %4, %a0
-  store i32 %5, i32* %1
-  call void @llvm.x86.sse.ldmxcsr(i8* %2)
+  store i32 %5, ptr %1
+  call void @llvm.x86.sse.ldmxcsr(ptr %2)
   ret void
 }
 
@@ -2193,13 +2193,13 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
 ; X64-AVX-NEXT:    vldmxcsr (%rax) # encoding: [0xc5,0xf8,0xae,0x10]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %1 = alloca i32, align 4
-  %2 = bitcast i32* %1 to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %2)
-  %3 = load i32, i32* %1
+  %2 = bitcast ptr %1 to ptr
+  call void @llvm.x86.sse.stmxcsr(ptr %2)
+  %3 = load i32, ptr %1
   %4 = and i32 %3, -24577
   %5 = or i32 %4, %a0
-  store i32 %5, i32* %1
-  call void @llvm.x86.sse.ldmxcsr(i8* %2)
+  store i32 %5, ptr %1
+  call void @llvm.x86.sse.ldmxcsr(ptr %2)
   ret void
 }
 
@@ -2326,9 +2326,9 @@ define void @test_mm_setcsr(i32 %a0) nounwind {
 ; X64-AVX-NEXT:    vldmxcsr (%rax) # encoding: [0xc5,0xf8,0xae,0x10]
 ; X64-AVX-NEXT:    retq # encoding: [0xc3]
   %st = alloca i32, align 4
-  store i32 %a0, i32* %st, align 4
-  %bc = bitcast i32* %st to i8*
-  call void @llvm.x86.sse.ldmxcsr(i8* %bc)
+  store i32 %a0, ptr %st, align 4
+  %bc = bitcast ptr %st to ptr
+  call void @llvm.x86.sse.ldmxcsr(ptr %bc)
   ret void
 }
 
@@ -2573,7 +2573,7 @@ define float @test_mm_sqrt_ss_scalar(float %a0) {
   ret float %sqrt
 }
 
-define void @test_mm_store_ps(float *%a0, <4 x float> %a1) {
+define void @test_mm_store_ps(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_store_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2606,12 +2606,12 @@ define void @test_mm_store_ps(float *%a0, <4 x float> %a1) {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  store <4 x float> %a1, <4 x float>* %arg0, align 16
+  %arg0 = bitcast ptr %a0 to ptr
+  store <4 x float> %a1, ptr %arg0, align 16
   ret void
 }
 
-define void @test_mm_store_ps1(float *%a0, <4 x float> %a1) {
+define void @test_mm_store_ps1(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_store_ps1:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2654,13 +2654,13 @@ define void @test_mm_store_ps1(float *%a0, <4 x float> %a1) {
 ; X64-AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
 ; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
+  %arg0 = bitcast ptr %a0 to ptr
   %shuf = shufflevector <4 x float> %a1, <4 x float> undef, <4 x i32> zeroinitializer
-  store <4 x float> %shuf, <4 x float>* %arg0, align 16
+  store <4 x float> %shuf, ptr %arg0, align 16
   ret void
 }
 
-define void @test_mm_store_ss(float *%a0, <4 x float> %a1) {
+define void @test_mm_store_ss(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_store_ss:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2694,11 +2694,11 @@ define void @test_mm_store_ss(float *%a0, <4 x float> %a1) {
 ; X64-AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
   %ext = extractelement <4 x float> %a1, i32 0
-  store float %ext, float* %a0, align 1
+  store float %ext, ptr %a0, align 1
   ret void
 }
 
-define void @test_mm_store1_ps(float *%a0, <4 x float> %a1) {
+define void @test_mm_store1_ps(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_store1_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2741,13 +2741,13 @@ define void @test_mm_store1_ps(float *%a0, <4 x float> %a1) {
 ; X64-AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
 ; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
+  %arg0 = bitcast ptr %a0 to ptr
   %shuf = shufflevector <4 x float> %a1, <4 x float> undef, <4 x i32> zeroinitializer
-  store <4 x float> %shuf, <4 x float>* %arg0, align 16
+  store <4 x float> %shuf, ptr %arg0, align 16
   ret void
 }
 
-define void @test_mm_storeh_pi(x86_mmx *%a0, <4 x float> %a1) nounwind {
+define void @test_mm_storeh_pi(ptr %a0, <4 x float> %a1) nounwind {
 ; X86-SSE1-LABEL: test_mm_storeh_pi:
 ; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl %ebp # encoding: [0x55]
@@ -2808,14 +2808,14 @@ define void @test_mm_storeh_pi(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X64-AVX512-NEXT:    vpextrq $1, %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0x16,0xc0,0x01]
 ; X64-AVX512-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a0 to i64*
+  %ptr = bitcast ptr %a0 to ptr
   %bc  = bitcast <4 x float> %a1 to <2 x i64>
   %ext = extractelement <2 x i64> %bc, i32 1
-  store i64 %ext, i64* %ptr
+  store i64 %ext, ptr %ptr
   ret void
 }
 
-define void @test_mm_storeh_pi2(x86_mmx *%a0, <4 x float> %a1) nounwind {
+define void @test_mm_storeh_pi2(ptr %a0, <4 x float> %a1) nounwind {
 ; X86-SSE-LABEL: test_mm_storeh_pi2:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2848,13 +2848,13 @@ define void @test_mm_storeh_pi2(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovhps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x17,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a0 to <2 x float>*
+  %ptr = bitcast ptr %a0 to ptr
   %ext = shufflevector <4 x float> %a1, <4 x float> undef, <2 x i32> <i32 2, i32 3>
-  store <2 x float> %ext, <2 x float>* %ptr
+  store <2 x float> %ext, ptr %ptr
   ret void
 }
 
-define void @test_mm_storel_pi(x86_mmx *%a0, <4 x float> %a1) nounwind {
+define void @test_mm_storel_pi(ptr %a0, <4 x float> %a1) nounwind {
 ; X86-SSE1-LABEL: test_mm_storel_pi:
 ; X86-SSE1:       # %bb.0:
 ; X86-SSE1-NEXT:    pushl %ebp # encoding: [0x55]
@@ -2913,15 +2913,15 @@ define void @test_mm_storel_pi(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X64-AVX512-NEXT:    vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
 ; X64-AVX512-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a0 to i64*
+  %ptr = bitcast ptr %a0 to ptr
   %bc  = bitcast <4 x float> %a1 to <2 x i64>
   %ext = extractelement <2 x i64> %bc, i32 0
-  store i64 %ext, i64* %ptr
+  store i64 %ext, ptr %ptr
   ret void
 }
 
 ; FIXME: Switch the frontend to use this code.
-define void @test_mm_storel_pi2(x86_mmx *%a0, <4 x float> %a1) nounwind {
+define void @test_mm_storel_pi2(ptr %a0, <4 x float> %a1) nounwind {
 ; X86-SSE-LABEL: test_mm_storel_pi2:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -2954,13 +2954,13 @@ define void @test_mm_storel_pi2(x86_mmx *%a0, <4 x float> %a1) nounwind {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovlps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %ptr = bitcast x86_mmx* %a0 to <2 x float>*
+  %ptr = bitcast ptr %a0 to ptr
   %ext = shufflevector <4 x float> %a1, <4 x float> undef, <2 x i32> <i32 0, i32 1>
-  store <2 x float> %ext, <2 x float>* %ptr
+  store <2 x float> %ext, ptr %ptr
   ret void
 }
 
-define void @test_mm_storer_ps(float *%a0, <4 x float> %a1) {
+define void @test_mm_storer_ps(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_storer_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -3005,13 +3005,13 @@ define void @test_mm_storer_ps(float *%a0, <4 x float> %a1) {
 ; X64-AVX512-NEXT:    # xmm0 = xmm0[3,2,1,0]
 ; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
+  %arg0 = bitcast ptr %a0 to ptr
   %shuf = shufflevector <4 x float> %a1, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-  store <4 x float> %shuf, <4 x float>* %arg0, align 16
+  store <4 x float> %shuf, ptr %arg0, align 16
   ret void
 }
 
-define void @test_mm_storeu_ps(float *%a0, <4 x float> %a1) {
+define void @test_mm_storeu_ps(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_storeu_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -3044,12 +3044,12 @@ define void @test_mm_storeu_ps(float *%a0, <4 x float> %a1) {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  store <4 x float> %a1, <4 x float>* %arg0, align 1
+  %arg0 = bitcast ptr %a0 to ptr
+  store <4 x float> %a1, ptr %arg0, align 1
   ret void
 }
 
-define void @test_mm_stream_ps(float *%a0, <4 x float> %a1) {
+define void @test_mm_stream_ps(ptr %a0, <4 x float> %a1) {
 ; X86-SSE-LABEL: test_mm_stream_ps:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -3082,8 +3082,8 @@ define void @test_mm_stream_ps(float *%a0, <4 x float> %a1) {
 ; X64-AVX512:       # %bb.0:
 ; X64-AVX512-NEXT:    vmovntps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %arg0 = bitcast float* %a0 to <4 x float>*
-  store <4 x float> %a1, <4 x float>* %arg0, align 16, !nontemporal !0
+  %arg0 = bitcast ptr %a0 to ptr
+  store <4 x float> %a1, ptr %arg0, align 16, !nontemporal !0
   ret void
 }
 
@@ -3128,7 +3128,7 @@ define <4 x float> @test_mm_sub_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
   ret <4 x float> %res
 }
 
-define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x float>* %a2, <4 x float>* %a3) nounwind {
+define void @test_MM_TRANSPOSE4_PS(ptr %a0, ptr %a1, ptr %a2, ptr %a3) nounwind {
 ; X86-SSE-LABEL: test_MM_TRANSPOSE4_PS:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    pushl %esi # encoding: [0x56]
@@ -3322,10 +3322,10 @@ define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x floa
 ; X64-AVX512-NEXT:    vmovaps %xmm4, (%rdx) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x22]
 ; X64-AVX512-NEXT:    vmovaps %xmm0, (%rcx) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x01]
 ; X64-AVX512-NEXT:    retq # encoding: [0xc3]
-  %row0 = load <4 x float>, <4 x float>* %a0, align 16
-  %row1 = load <4 x float>, <4 x float>* %a1, align 16
-  %row2 = load <4 x float>, <4 x float>* %a2, align 16
-  %row3 = load <4 x float>, <4 x float>* %a3, align 16
+  %row0 = load <4 x float>, ptr %a0, align 16
+  %row1 = load <4 x float>, ptr %a1, align 16
+  %row2 = load <4 x float>, ptr %a2, align 16
+  %row3 = load <4 x float>, ptr %a3, align 16
   %tmp0 = shufflevector <4 x float> %row0, <4 x float> %row1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %tmp2 = shufflevector <4 x float> %row2, <4 x float> %row3, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %tmp1 = shufflevector <4 x float> %row0, <4 x float> %row1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -3334,10 +3334,10 @@ define void @test_MM_TRANSPOSE4_PS(<4 x float>* %a0, <4 x float>* %a1, <4 x floa
   %res1 = shufflevector <4 x float> %tmp2, <4 x float> %tmp0, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
   %res2 = shufflevector <4 x float> %tmp1, <4 x float> %tmp3, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %res3 = shufflevector <4 x float> %tmp3, <4 x float> %tmp1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
-  store <4 x float> %res0, <4 x float>* %a0, align 16
-  store <4 x float> %res1, <4 x float>* %a1, align 16
-  store <4 x float> %res2, <4 x float>* %a2, align 16
-  store <4 x float> %res3, <4 x float>* %a3, align 16
+  store <4 x float> %res0, ptr %a0, align 16
+  store <4 x float> %res1, ptr %a1, align 16
+  store <4 x float> %res2, ptr %a2, align 16
+  store <4 x float> %res3, ptr %a3, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll b/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
index 6b262b34acce6..27ca9fd47e085 100644
--- a/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
@@ -12,7 +12,7 @@
 define i32 @_Z18read_response_sizev() #0 !dbg !9 {
 entry:
   tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !23, metadata !DIExpression()), !dbg !39
-  %0 = load i64, ptr getelementptr inbounds ({ i64, [56 x i8] }, ptr @a, i32 0, i32 0), align 8, !dbg !40
+  %0 = load i64, ptr @a, align 8, !dbg !40
   tail call void @llvm.dbg.value(metadata i32 undef, i64 0, metadata !64, metadata !DIExpression()), !dbg !71
   %1 = trunc i64 %0 to i32
   ret i32 %1
@@ -60,7 +60,7 @@ attributes #0 = { sspreq }
 !19 = !DILocalVariable(name: "c", line: 29, scope: !9, file: !10, type: !13)
 !20 = !{}
 !21 = !{i32 2, !"Dwarf Version", i32 2}
-!22 = !{ptr getelementptr inbounds ({ i64, [56 x i8] }, ptr @a, i32 0, i32 0)}
+!22 = !{ptr @a}
 !23 = !DILocalVariable(name: "p2", line: 12, arg: 2, scope: !24, file: !10, type: !32)
 !24 = distinct !DISubprogram(name: "min<unsigned long long>", linkageName: "_ZN3__13minIyEERKT_S3_RS1_", line: 12, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 12, file: !1, scope: !25, type: !27, templateParams: !33, retainedNodes: !35)
 !25 = !DINamespace(name: "__1", scope: null)

diff  --git a/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll b/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
index 731cc95114f77..391963de79703 100644
--- a/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
+++ b/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
@@ -5,9 +5,9 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 declare void @foo() gc "statepoint-example"
-declare void @bar(i8 addrspace(1)*) gc "statepoint-example"
+declare void @bar(ptr addrspace(1)) gc "statepoint-example"
 
-declare i32* @fake_personality_function()
+declare ptr @fake_personality_function()
 
 ; Simplest possible test demonstrating the problem
 
@@ -29,15 +29,15 @@ declare i32* @fake_personality_function()
 ; CHECK:        RET 0
 ; CHECK:      bb.2
 ; CHECK:        RET 0
-define void @test(i8 addrspace(1)* %a)  gc "statepoint-example" {
+define void @test(ptr addrspace(1) %a)  gc "statepoint-example" {
 entry:
-  %not7 = icmp eq i8 addrspace(1)* %a, null
-  %statepoint_token1745 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2, i32 5, void ()* nonnull elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(i8 addrspace(1)* %a) ]
+  %not7 = icmp eq ptr addrspace(1) %a, null
+  %statepoint_token1745 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(ptr addrspace(1) %a) ]
   br i1 %not7, label %zero, label %not_zero
 
 not_zero:
-  %a.relocated = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %statepoint_token1745, i32 0, i32 0) ; (%a, %a)
-  %statepoint_token1752 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2, i32 5, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @bar, i32 1, i32 0, i8 addrspace(1)* %a.relocated, i32 0, i32 0) [ "deopt"(), "gc-live"() ]
+  %a.relocated = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %statepoint_token1745, i32 0, i32 0) ; (%a, %a)
+  %statepoint_token1752 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1))) @bar, i32 1, i32 0, ptr addrspace(1) %a.relocated, i32 0, i32 0) [ "deopt"(), "gc-live"() ]
   ret void
 
 zero:
@@ -66,31 +66,31 @@ zero:
 ; CHECK:        TEST64rr killed %1, %1, implicit-def $eflags
 ; CHECK:        JCC_1 %bb.1, 5, implicit killed $eflags
 ; CHECK:        JMP_1 %bb.6
-define void @test2(i8 addrspace(1)* %this, i32 %0, i32 addrspace(1)* %p0, i8 addrspace(1)* %p1) gc "statepoint-example" personality i32* ()* @fake_personality_function {
+define void @test2(ptr addrspace(1) %this, i32 %0, ptr addrspace(1) %p0, ptr addrspace(1) %p1) gc "statepoint-example" personality ptr @fake_personality_function {
 preheader:
   br label %loop.head
 
 loop.head:
-  %phi1 = phi i32 addrspace(1)* [ %p0, %preheader ], [ %addr.i.i46797.remat64523, %tail ]
-  %v1 = phi i8 addrspace(1)* [ %p1, %preheader ], [ %v3, %tail ]
-  %not3= icmp ne i32 addrspace(1)* %phi1, null
+  %phi1 = phi ptr addrspace(1) [ %p0, %preheader ], [ %addr.i.i46797.remat64523, %tail ]
+  %v1 = phi ptr addrspace(1) [ %p1, %preheader ], [ %v3, %tail ]
+  %not3= icmp ne ptr addrspace(1) %phi1, null
   br i1 %not3, label %BB1, label %BB3
 
 BB3:
-  %token1 = call token (i64, i32, i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i64 2882400000, i32 0, i8 addrspace(1)* ()* elementtype(i8 addrspace(1)* ()) undef, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(i8 addrspace(1)* %v1) ]
-  %v2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token1, i32 0, i32 0) ; (%v1, %v1)
-  %cond = icmp eq i8 addrspace(1)* null, %v2
-  %token2 = invoke token (i64, i32, i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i64 2882400000, i32 0, i8 addrspace(1)* ()* elementtype(i8 addrspace(1)* ()) undef, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(i8 addrspace(1)* %v2, i32 addrspace(1)* %phi1) ]
+  %token1 = call token (i64, i32, ptr addrspace(1) ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr addrspace(1) ()* elementtype(ptr addrspace(1) ()) undef, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(ptr addrspace(1) %v1) ]
+  %v2 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token1, i32 0, i32 0) ; (%v1, %v1)
+  %cond = icmp eq ptr addrspace(1) null, %v2
+  %token2 = invoke token (i64, i32, ptr addrspace(1) ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr addrspace(1) ()* elementtype(ptr addrspace(1) ()) undef, i32 0, i32 0, i32 0, i32 0) [ "deopt"(), "gc-live"(ptr addrspace(1) %v2, ptr addrspace(1) %phi1) ]
           to label %BB2 unwind label %BB6
 
 BB2:
-  %v3 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token2, i32 0, i32 0) ; (%v2, %v2)
-  %.remat64522 = getelementptr inbounds i8, i8 addrspace(1)* %v3, i64 8
-  %addr.i.i46797.remat64523 = bitcast i8 addrspace(1)* %.remat64522 to i32 addrspace(1)*
+  %v3 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token2, i32 0, i32 0) ; (%v2, %v2)
+  %.remat64522 = getelementptr inbounds i8, ptr addrspace(1) %v3, i64 8
+  %addr.i.i46797.remat64523 = bitcast ptr addrspace(1) %.remat64522 to ptr addrspace(1)
   br i1 undef, label %BB4, label %tail
 
 BB4:
-  %dummy = ptrtoint i64* undef to i64
+  %dummy = ptrtoint ptr undef to i64
   br label %tail
 
 tail:
@@ -107,7 +107,5 @@ BB6:
 }
 
 
-declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #5
-declare token @llvm.experimental.gc.statepoint.p0f_p1i8f(i64 immarg, i32 immarg, i8 addrspace(1)* ()*, i32 immarg, i32 immarg, ...)
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 immarg, i32 immarg, void (i8 addrspace(1)*)*, i32 immarg, i32 immarg, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #5
+declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr addrspace(1) ()*, i32 immarg, i32 immarg, ...)

diff  --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
index 937cc173e7fae..9cd3731518120 100644
--- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
 ; Function Attrs: nounwind uwtable
-define void @tail_dup_merge_loops(i32 %a, i8* %b, i8* %c) local_unnamed_addr #0 {
+define void @tail_dup_merge_loops(i32 %a, ptr %b, ptr %c) local_unnamed_addr #0 {
 ; CHECK-LABEL: tail_dup_merge_loops:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp .LBB0_1
@@ -34,15 +34,15 @@ entry:
   br label %outer_loop_top
 
 outer_loop_top:                         ; preds = %inner_loop_exit, %entry
-  %dst.0.ph.i = phi i8* [ %b, %entry ], [ %scevgep679.i, %inner_loop_exit ]
+  %dst.0.ph.i = phi ptr [ %b, %entry ], [ %scevgep679.i, %inner_loop_exit ]
   br i1 %notlhs674.i, label %exit, label %inner_loop_preheader
 
 inner_loop_preheader:                           ; preds = %outer_loop_top
   br label %inner_loop_top
 
 inner_loop_top:                                     ; preds = %inner_loop_latch, %inner_loop_preheader
-  %dst.0.i = phi i8* [ %inc, %inner_loop_latch ], [ %dst.0.ph.i, %inner_loop_preheader ]
-  %var = load i8, i8* %dst.0.i
+  %dst.0.i = phi ptr [ %inc, %inner_loop_latch ], [ %dst.0.ph.i, %inner_loop_preheader ]
+  %var = load i8, ptr %dst.0.i
   %tobool1.i = icmp slt i8 %var, 0
   br label %inner_loop_test
 
@@ -50,13 +50,13 @@ inner_loop_test:                                       ; preds = %inner_loop_top
   br i1 %tobool1.i, label %inner_loop_exit, label %inner_loop_latch
 
 inner_loop_exit:                       ; preds = %inner_loop_test
-  %scevgep.i = getelementptr i8, i8* %dst.0.i, i64 1
-  %scevgep679.i = getelementptr i8, i8* %scevgep.i, i64 0
+  %scevgep.i = getelementptr i8, ptr %dst.0.i, i64 1
+  %scevgep679.i = getelementptr i8, ptr %scevgep.i, i64 0
   br label %outer_loop_top
 
 inner_loop_latch:                                ; preds = %inner_loop_test
-  %cmp75.i = icmp ult i8* %dst.0.i, %c
-  %inc = getelementptr i8, i8* %dst.0.i, i64 2
+  %cmp75.i = icmp ult ptr %dst.0.i, %c
+  %inc = getelementptr i8, ptr %dst.0.i, i64 2
   br label %inner_loop_top
 
 exit:                              ; preds = %outer_loop_top
@@ -85,7 +85,7 @@ exit:                              ; preds = %outer_loop_top
 ; The rest of the blocks in the function are noise unfortunately. Bugpoint
 ; couldn't shrink the test any further.
 
-define i32 @loop_shared_header(i8* %exe, i32 %exesz, i32 %headsize, i32 %min, i32 %wwprva, i32 %e_lfanew, i8* readonly %wwp, i32 %wwpsz, i16 zeroext %sects) local_unnamed_addr #0 {
+define i32 @loop_shared_header(ptr %exe, i32 %exesz, i32 %headsize, i32 %min, i32 %wwprva, i32 %e_lfanew, ptr readonly %wwp, i32 %wwpsz, i16 zeroext %sects) local_unnamed_addr #0 {
 ; CHECK-LABEL: loop_shared_header:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    pushq %rbp
@@ -202,23 +202,23 @@ define i32 @loop_shared_header(i8* %exe, i32 %exesz, i32 %headsize, i32 %min, i3
 ; CHECK-NEXT:  .LBB1_25: # %wunpsect.exit.thread.loopexit389
 ; CHECK-NEXT:  .LBB1_26: # %wunpsect.exit.thread.loopexit391
 entry:
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %mul = shl nsw i32 %0, 2
   br i1 undef, label %if.end19, label %cleanup
 
 if.end19:                                         ; preds = %entry
   %conv = zext i32 %mul to i64
-  %call = tail call i8* @cli_calloc(i64 %conv, i64 1)
+  %call = tail call ptr @cli_calloc(i64 %conv, i64 1)
   %1 = icmp eq i32 %exesz, 0
   %notrhs = icmp eq i32 %0, 0
   %or.cond117.not = or i1 %1, %notrhs
   %or.cond202 = or i1 %or.cond117.not, undef
-  %cmp35 = icmp ult i8* %call, %exe
+  %cmp35 = icmp ult ptr %call, %exe
   %or.cond203 = or i1 %or.cond202, %cmp35
   br i1 %or.cond203, label %cleanup, label %if.end50
 
 if.end50:                                         ; preds = %if.end19
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %call, i8* undef, i64 %conv, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull %call, ptr undef, i64 %conv, i1 false)
   %cmp1.i.i = icmp ugt i32 %mul, 3
   br i1 %cmp1.i.i, label %shared_preheader, label %wunpsect.exit.thread.loopexit391
 
@@ -227,7 +227,7 @@ shared_preheader:                                 ; preds = %if.end50
 
 outer_loop_header:                                ; preds = %outer_loop_latch, %shared_preheader
   %bits.1.i = phi i8 [ 32, %shared_preheader ], [ %bits.43.i, %outer_loop_latch ]
-  %dst.0.ph.i = phi i8* [ undef, %shared_preheader ], [ %scevgep679.i, %outer_loop_latch ]
+  %dst.0.ph.i = phi ptr [ undef, %shared_preheader ], [ %scevgep679.i, %outer_loop_latch ]
   %2 = icmp eq i32 %0, 0
   br i1 %2, label %while.cond.us1412.i, label %shared_loop_header
 
@@ -241,8 +241,8 @@ if.end41.us1436.i:                                ; preds = %while.cond.us1412.i
   unreachable
 
 shared_loop_header:                               ; preds = %dup_early2, %dup_early1
-  %dst.0.i = phi i8* [ undef, %inner_loop_body ], [ %dst.0.ph.i, %outer_loop_header ], [ undef, %dead_block ]
-  %cmp3.i1172.i = icmp ult i8* null, %call
+  %dst.0.i = phi ptr [ undef, %inner_loop_body ], [ %dst.0.ph.i, %outer_loop_header ], [ undef, %dead_block ]
+  %cmp3.i1172.i = icmp ult ptr null, %call
   br i1 %cmp3.i1172.i, label %wunpsect.exit.thread.loopexit389, label %inner_loop_body
 
 inner_loop_body:                                  ; preds = %shared_loop_header
@@ -250,7 +250,7 @@ inner_loop_body:                                  ; preds = %shared_loop_header
   br i1 %3, label %if.end96.i, label %shared_loop_header
 
 dead_block:                                       ; preds = %inner_loop_body
-  %cmp75.i = icmp ult i8* %dst.0.i, null
+  %cmp75.i = icmp ult ptr %dst.0.i, null
   br label %shared_loop_header
 
 if.end96.i:                                       ; preds = %inner_loop_body
@@ -258,7 +258,7 @@ if.end96.i:                                       ; preds = %inner_loop_body
   br i1 %cmp97.i, label %if.then99.i, label %if.end287.i
 
 if.then99.i:                                      ; preds = %if.end96.i
-  tail call void (i8*, ...) @cli_dbgmsg(i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str.6, i64 0, i64 0), i32 undef)
+  tail call void (ptr, ...) @cli_dbgmsg(ptr @.str.6, i32 undef)
   br label %cleanup
 
 if.end287.i:                                      ; preds = %if.end96.i
@@ -270,7 +270,7 @@ if.end308.i:                                      ; preds = %if.end287.i
   br i1 undef, label %if.end335.i, label %merge_predecessor_split
 
 merge_predecessor_split:                          ; preds = %if.end308.i
-  %4 = bitcast i8* undef to i32*
+  %4 = bitcast ptr undef to ptr
   br label %outer_loop_latch
 
 if.end335.i:                                      ; preds = %if.end308.i
@@ -284,8 +284,8 @@ outer_loop_latch:                                 ; preds = %merge_other, %if.en
   %backsize.0.i = phi i16 [ %conv294.i, %if.end287.i ], [ 0, %merge_other ], [ 0, %merge_predecessor_split ], [ 0, %if.end335.i ]
   %5 = add i16 %backsize.0.i, -1
   %6 = zext i16 %5 to i64
-  %scevgep.i = getelementptr i8, i8* %dst.0.ph.i, i64 1
-  %scevgep679.i = getelementptr i8, i8* %scevgep.i, i64 %6
+  %scevgep.i = getelementptr i8, ptr %dst.0.ph.i, i64 1
+  %scevgep679.i = getelementptr i8, ptr %scevgep.i, i64 %6
   br label %outer_loop_header
 
 wunpsect.exit.thread.loopexit389:                 ; preds = %shared_loop_header
@@ -300,12 +300,12 @@ cleanup:                                          ; preds = %if.then99.i, %while
 }
 
 ; Function Attrs: nounwind
-declare void @cli_dbgmsg(i8*, ...) local_unnamed_addr #0
+declare void @cli_dbgmsg(ptr, ...) local_unnamed_addr #0
 
 ; Function Attrs: nounwind
-declare i8* @cli_calloc(i64, i64) local_unnamed_addr #0
+declare ptr @cli_calloc(i64, i64) local_unnamed_addr #0
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/X86/tailcc-dwarf.ll b/llvm/test/CodeGen/X86/tailcc-dwarf.ll
index 2676a091081ee..524d99d711bc4 100644
--- a/llvm/test/CodeGen/X86/tailcc-dwarf.ll
+++ b/llvm/test/CodeGen/X86/tailcc-dwarf.ll
@@ -1,25 +1,25 @@
 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -O0 --frame-pointer=non-leaf %s -o - | FileCheck %s
 
-%block = type { %blockheader, [0 x i64*] }
+%block = type { %blockheader, [0 x ptr] }
 %blockheader = type { i64 }
 
 define void @scanStackRoots(i32) {
   ret void
 }
 
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
 entry:
-  %0 = call tailcc %block* @apply_rule_6870(%block* null, %block* null)
+  %0 = call tailcc ptr @apply_rule_6870(ptr null, ptr null)
   ret i32 0
 }
 
-define internal tailcc %block* @apply_rule_6870(%block* %0, %block* %1) {
+define internal tailcc ptr @apply_rule_6870(ptr %0, ptr %1) {
 entry:
-  %2 = tail call tailcc %block* @sender12(%block* %0, %block* %1)
-  ret %block* null
+  %2 = tail call tailcc ptr @sender12(ptr %0, ptr %1)
+  ret ptr null
 }
 
-define internal tailcc %block* @sender12(%block* %0, %block* %1) {
+define internal tailcc ptr @sender12(ptr %0, ptr %1) {
 ; CHECK-LABEL: sender12:
 ; CHECK: .cfi_startproc
 ; CHECK: subq $8160, %rsp
@@ -28,20 +28,20 @@ define internal tailcc %block* @sender12(%block* %0, %block* %1) {
 ; CHECK: .cfi_offset %rbp, -8176
 entry:
   %a = alloca [1024 x i32]
-  %b = load [1024 x i32], [1024 x i32]* %a
+  %b = load [1024 x i32], ptr %a
   call void @scanStackRoots(i32 1)
-  %2 = tail call tailcc %block* @apply_rule_6300(%block* %0, %block* %1, [1024 x i32] %b)
-  ret %block* %2
+  %2 = tail call tailcc ptr @apply_rule_6300(ptr %0, ptr %1, [1024 x i32] %b)
+  ret ptr %2
 }
 
-define internal tailcc %block* @apply_rule_6300(%block* %0, %block* %1, [1024 x i32] %2) {
+define internal tailcc ptr @apply_rule_6300(ptr %0, ptr %1, [1024 x i32] %2) {
 entry:
-  %3 = tail call tailcc %block* @sender4(%block* %0, %block* %1)
-  ret %block* %3
+  %3 = tail call tailcc ptr @sender4(ptr %0, ptr %1)
+  ret ptr %3
 }
 
-define internal tailcc %block* @sender4(%block* %0, %block* %1) {
+define internal tailcc ptr @sender4(ptr %0, ptr %1) {
 entry:
   call void @scanStackRoots(i32 2)
-  ret %block* null
+  ret ptr null
 }

diff  --git a/llvm/test/CodeGen/X86/threadlocal_address.ll b/llvm/test/CodeGen/X86/threadlocal_address.ll
index 82597c08a4bb0..6c6649b6419c3 100644
--- a/llvm/test/CodeGen/X86/threadlocal_address.ll
+++ b/llvm/test/CodeGen/X86/threadlocal_address.ll
@@ -19,7 +19,7 @@ entry:
   ret i32 %3
 }
 
- at j =  thread_local addrspace(1) global  i32 addrspace(0)* @i, align 4
+ at j =  thread_local addrspace(1) global  ptr addrspace(0) @i, align 4
 define noundef i32 @bar() {
 ; CHECK: %0:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gottpoff) @j, $noreg :: (load (s64) from got)
 ; CHECK: %1:gr32 = MOV32rm %0, 1, $noreg, 0, $fs :: (load (s32) from %ir.0, addrspace 1)

diff  --git a/llvm/test/CodeGen/X86/win64-byval.ll b/llvm/test/CodeGen/X86/win64-byval.ll
index 676e81155f691..6f48ce160cb6b 100644
--- a/llvm/test/CodeGen/X86/win64-byval.ll
+++ b/llvm/test/CodeGen/X86/win64-byval.ll
@@ -88,12 +88,12 @@ define void @test() {
   ret void
 }
 
-define i64 @receive_byval_arg_via_stack_arg(i64* byval(i64), i64* byval(i64), i64* byval(i64), i64* byval(i64), i64* byval(i64) %x) {
+define i64 @receive_byval_arg_via_stack_arg(ptr byval(i64), ptr byval(i64), ptr byval(i64), ptr byval(i64), ptr byval(i64) %x) {
 ; CHECK-LABEL: receive_byval_arg_via_stack_arg:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movq (%rax), %rax
 ; CHECK-NEXT:    retq
-  %r = load i64, i64* %x
+  %r = load i64, ptr %x
   ret i64 %r
 }

diff  --git a/llvm/test/CodeGen/X86/windows-seh-EHa-CppCatchDotDotDot.ll b/llvm/test/CodeGen/X86/windows-seh-EHa-CppCatchDotDotDot.ll
index 2b09c0133c1db..25d92033cc6bc 100644
--- a/llvm/test/CodeGen/X86/windows-seh-EHa-CppCatchDotDotDot.ll
+++ b/llvm/test/CodeGen/X86/windows-seh-EHa-CppCatchDotDotDot.ll
@@ -46,7 +46,7 @@ source_filename = "windows-seh-EHa-CppCatchDotDotDot.cpp"
 target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-windows-msvc"
 
-%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%rtti.TypeDescriptor2 = type { ptr, ptr, [3 x i8] }
 %eh.CatchableType = type { i32, i32, i32, i32, i32, i32, i32 }
 %eh.CatchableTypeArray.1 = type { i32, [1 x i32] }
 %eh.ThrowInfo = type { i32, i32, i32, i32 }
@@ -68,17 +68,17 @@ $"??_C at _0N@LJHFFAKD@?5in?5A?5ctor?5?6?$AA@" = comdat any
 
 $"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@" = comdat any
 
-@"?pt1@@3PEAHEA" = dso_local global i32* null, align 8
-@"?pt2@@3PEAHEA" = dso_local global i32* null, align 8
-@"?pt3@@3PEAHEA" = dso_local global i32* null, align 8
+@"?pt1@@3PEAHEA" = dso_local global ptr null, align 8
+@"?pt2@@3PEAHEA" = dso_local global ptr null, align 8
+@"?pt3@@3PEAHEA" = dso_local global ptr null, align 8
 @"?g@@3HA" = dso_local global i32 0, align 4
 @"??_C at _0BJ@EIKFKKLB@?5in?5catch?$CI?4?4?4?$CJ?5funclet?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [25 x i8] c" in catch(...) funclet \0A\00", comdat, align 1
-@"??_7type_info@@6B@" = external constant i8*
-@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+@"??_7type_info@@6B@" = external constant ptr
+@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { ptr @"??_7type_info@@6B@", ptr null, [3 x i8] c".H\00" }, comdat
 @__ImageBase = external dso_local constant i8
-@"_CT??_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor2* @"??_R0H at 8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
- at _CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableType* @"_CT??_R0H at 84" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
- at _TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableTypeArray.1* @_CTA1H to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, section ".xdata", comdat
+@"_CT??_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"??_R0H at 8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
+ at _CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"_CT??_R0H at 84" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
+ at _TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @_CTA1H to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, section ".xdata", comdat
 @"??_C at _0CN@MKCAOFNA@?5Test?5CPP?5unwind?3?5in?5except?5hand@" = linkonce_odr dso_local unnamed_addr constant [45 x i8] c" Test CPP unwind: in except handler i = %d \0A\00", comdat, align 1
 @"??_C at _0N@LJHFFAKD@?5in?5A?5ctor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c" in A ctor \0A\00", comdat, align 1
 @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c" in A dtor \0A\00", comdat, align 1
@@ -86,24 +86,24 @@ $"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@" = comdat any
 ; Function Attrs: noinline nounwind optnone
 define dso_local void @"?foo@@YAXXZ"() #0 {
 entry:
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   ret void
 }
 
 ; Function Attrs: noinline optnone
-define dso_local void @"?crash@@YAXH at Z"(i32 %i) #1 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?crash@@YAXH at Z"(i32 %i) #1 personality ptr @__CxxFrameHandler3 {
 entry:
   %i.addr = alloca i32, align 4
   %ObjA = alloca %struct.A, align 1
   %tmp = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
-  store i32 %0, i32* @"?g@@3HA", align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
+  store i32 %0, ptr @"?g@@3HA", align 4
   invoke void @llvm.seh.try.begin()
           to label %invoke.cont unwind label %catch.dispatch
 
 invoke.cont:                                      ; preds = %entry
-  %call = invoke %struct.A* @"??0A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %ObjA)
+  %call = invoke ptr @"??0A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjA)
           to label %invoke.cont1 unwind label %catch.dispatch
 
 invoke.cont1:                                     ; preds = %invoke.cont
@@ -111,12 +111,12 @@ invoke.cont1:                                     ; preds = %invoke.cont
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %invoke.cont1
-  %1 = load i32, i32* %i.addr, align 4
+  %1 = load i32, ptr %i.addr, align 4
   %cmp = icmp eq i32 %1, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %invoke.cont2
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %invoke.cont2
@@ -124,29 +124,29 @@ if.end:                                           ; preds = %if.then, %invoke.co
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %if.end
-  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %ObjA) #6
+  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjA) #6
   br label %try.cont
 
 ehcleanup:                                        ; preds = %if.end, %invoke.cont1
   %2 = cleanuppad within none []
-  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %ObjA) #6 [ "funclet"(token %2) ]
+  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjA) #6 [ "funclet"(token %2) ]
   cleanupret from %2 unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %ehcleanup, %invoke.cont, %entry
   %3 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %4 = catchpad within %3 [i8* null, i32 0, i8* null]
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([25 x i8], [25 x i8]* @"??_C at _0BJ@EIKFKKLB@?5in?5catch?$CI?4?4?4?$CJ?5funclet?5?6?$AA@", i64 0, i64 0)) [ "funclet"(token %4) ]
-  %5 = load i32, i32* %i.addr, align 4
+  %4 = catchpad within %3 [ptr null, i32 0, ptr null]
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0BJ@EIKFKKLB@?5in?5catch?$CI?4?4?4?$CJ?5funclet?5?6?$AA@") [ "funclet"(token %4) ]
+  %5 = load i32, ptr %i.addr, align 4
   %cmp4 = icmp eq i32 %5, 1
   br i1 %cmp4, label %if.then5, label %if.end6
 
 if.then5:                                         ; preds = %catch
-  %6 = load i32, i32* %i.addr, align 4
-  store i32 %6, i32* %tmp, align 4
-  %7 = bitcast i32* %tmp to i8*
-  call void @_CxxThrowException(i8* %7, %eh.ThrowInfo* @_TI1H) #7 [ "funclet"(token %4) ]
+  %6 = load i32, ptr %i.addr, align 4
+  store i32 %6, ptr %tmp, align 4
+  %7 = bitcast ptr %tmp to ptr
+  call void @_CxxThrowException(ptr %7, ptr @_TI1H) #7 [ "funclet"(token %4) ]
   unreachable
 
 if.end6:                                          ; preds = %catch
@@ -165,25 +165,25 @@ declare dso_local void @llvm.seh.try.begin() #2
 declare dso_local i32 @__CxxFrameHandler3(...)
 
 ; Function Attrs: noinline optnone
-define internal %struct.A* @"??0A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* returned %this) unnamed_addr #1 align 2 {
+define internal ptr @"??0A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr returned %this) unnamed_addr #1 align 2 {
 entry:
-  %retval = alloca %struct.A*, align 8
-  %this.addr = alloca %struct.A*, align 8
-  store %struct.A* %this, %struct.A** %this.addr, align 8
-  %this1 = load %struct.A*, %struct.A** %this.addr, align 8
-  store %struct.A* %this1, %struct.A** %retval, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@LJHFFAKD@?5in?5A?5ctor?5?6?$AA@", i64 0, i64 0))
-  %0 = load i32, i32* @"?g@@3HA", align 4
+  %retval = alloca ptr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  store ptr %this1, ptr %retval, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@LJHFFAKD@?5in?5A?5ctor?5?6?$AA@")
+  %0 = load i32, ptr @"?g@@3HA", align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %1 = load %struct.A*, %struct.A** %retval, align 8
-  ret %struct.A* %1
+  %1 = load ptr, ptr %retval, align 8
+  ret ptr %1
 }
 
 ; Function Attrs: nounwind readnone
@@ -193,31 +193,31 @@ declare dso_local void @llvm.seh.scope.begin() #3
 declare dso_local void @llvm.seh.scope.end() #3
 
 ; Function Attrs: noinline nounwind optnone
-define internal void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %this) unnamed_addr #0 align 2 {
+define internal void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %this) unnamed_addr #0 align 2 {
 entry:
-  %this.addr = alloca %struct.A*, align 8
-  store %struct.A* %this, %struct.A** %this.addr, align 8
-  %this1 = load %struct.A*, %struct.A** %this.addr, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@", i64 0, i64 0))
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@")
   ret void
 }
 
 declare dso_local void @"?printf@@YAXZZ"(...) #4
 
-declare dso_local void @_CxxThrowException(i8*, %eh.ThrowInfo*)
+declare dso_local void @_CxxThrowException(ptr, ptr)
 
 ; Function Attrs: noinline norecurse optnone
-define dso_local i32 @main() #5 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define dso_local i32 @main() #5 personality ptr @__C_specific_handler {
 entry:
   %retval = alloca i32, align 4
   %i = alloca i32, align 4
   %__exception_code = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 2
   br i1 %cmp, label %for.body, label %for.end
 
@@ -226,7 +226,7 @@ for.body:                                         ; preds = %for.cond
           to label %invoke.cont unwind label %catch.dispatch
 
 invoke.cont:                                      ; preds = %for.body
-  %1 = load volatile i32, i32* %i, align 4
+  %1 = load volatile i32, ptr %i, align 4
   invoke void @"?crash@@YAXH at Z"(i32 %1) #8
           to label %invoke.cont1 unwind label %catch.dispatch
 
@@ -238,23 +238,23 @@ catch.dispatch:                                   ; preds = %invoke.cont1, %invo
   %2 = catchswitch within none [label %__except] unwind to caller
 
 __except:                                         ; preds = %catch.dispatch
-  %3 = catchpad within %2 [i8* null]
+  %3 = catchpad within %2 [ptr null]
   catchret from %3 to label %__except3
 
 __except3:                                        ; preds = %__except
   %4 = call i32 @llvm.eh.exceptioncode(token %3)
-  store i32 %4, i32* %__exception_code, align 4
-  %5 = load i32, i32* %i, align 4
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([45 x i8], [45 x i8]* @"??_C at _0CN@MKCAOFNA@?5Test?5CPP?5unwind?3?5in?5except?5hand@", i64 0, i64 0), i32 %5)
+  store i32 %4, ptr %__exception_code, align 4
+  %5 = load i32, ptr %i, align 4
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0CN@MKCAOFNA@?5Test?5CPP?5unwind?3?5in?5except?5hand@", i32 %5)
   br label %__try.cont
 
 __try.cont:                                       ; preds = %__except3, %invoke.cont2
   br label %for.inc
 
 for.inc:                                          ; preds = %__try.cont
-  %6 = load i32, i32* %i, align 4
+  %6 = load i32, ptr %i, align 4
   %inc = add nsw i32 %6, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 invoke.cont2:                                     ; preds = %invoke.cont1

diff  --git a/llvm/test/CodeGen/X86/windows-seh-EHa-CppCondiTemps.ll b/llvm/test/CodeGen/X86/windows-seh-EHa-CppCondiTemps.ll
index 89528d096c43c..cc100c2965730 100644
--- a/llvm/test/CodeGen/X86/windows-seh-EHa-CppCondiTemps.ll
+++ b/llvm/test/CodeGen/X86/windows-seh-EHa-CppCondiTemps.ll
@@ -54,7 +54,7 @@ $"??_C at _0N@GFONDMMJ at in?5B2?5Dtor?5?6?$AA@" = comdat any
 $"??_C at _0N@HCJGCIIK at in?5B3?5Dtor?5?6?$AA@" = comdat any
 
 @"?xxxx@@3HA" = dso_local global i32 0, align 4
-@"?ptr@@3PEAHEA" = dso_local global i32* null, align 8
+@"?ptr@@3PEAHEA" = dso_local global ptr null, align 8
 @"??_C at _0N@FMGAAAAM at in?5B1?5Dtor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c"in B1 Dtor \0A\00", comdat, align 1
 @"??_C at _0N@GFONDMMJ at in?5B2?5Dtor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c"in B2 Dtor \0A\00", comdat, align 1
 @"??_C at _0N@HCJGCIIK at in?5B3?5Dtor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c"in B3 Dtor \0A\00", comdat, align 1
@@ -63,25 +63,25 @@ $"??_C at _0N@HCJGCIIK at in?5B3?5Dtor?5?6?$AA@" = comdat any
 define dso_local i32 @"?foo@@YAHH at Z"(i32 %a) #0 {
 entry:
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %0 = load i32, i32* @"?xxxx@@3HA", align 4
-  %1 = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %0 = load i32, ptr @"?xxxx@@3HA", align 4
+  %1 = load i32, ptr %a.addr, align 4
   %add = add nsw i32 %0, %1
   ret i32 %add
 }
 
 ; Function Attrs: noinline optnone mustprogress
-define dso_local i32 @"?bar@@YAHHVB1@@VB2@@@Z"(i32 %j, i32 %b1Bar.coerce, i32 %b2Bar.coerce) #1 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local i32 @"?bar@@YAHHVB1@@VB2@@@Z"(i32 %j, i32 %b1Bar.coerce, i32 %b2Bar.coerce) #1 personality ptr @__CxxFrameHandler3 {
 entry:
   %b1Bar = alloca %class.B1, align 4
   %b2Bar = alloca %class.B2, align 4
   %j.addr = alloca i32, align 4
   %ww = alloca i32, align 4
-  %coerce.dive = getelementptr inbounds %class.B1, %class.B1* %b1Bar, i32 0, i32 0
-  store i32 %b1Bar.coerce, i32* %coerce.dive, align 4
-  %coerce.dive1 = getelementptr inbounds %class.B2, %class.B2* %b2Bar, i32 0, i32 0
-  %coerce.dive2 = getelementptr inbounds %class.B1, %class.B1* %coerce.dive1, i32 0, i32 0
-  store i32 %b2Bar.coerce, i32* %coerce.dive2, align 4
+  %coerce.dive = getelementptr inbounds %class.B1, ptr %b1Bar, i32 0, i32 0
+  store i32 %b1Bar.coerce, ptr %coerce.dive, align 4
+  %coerce.dive1 = getelementptr inbounds %class.B2, ptr %b2Bar, i32 0, i32 0
+  %coerce.dive2 = getelementptr inbounds %class.B1, ptr %coerce.dive1, i32 0, i32 0
+  store i32 %b2Bar.coerce, ptr %coerce.dive2, align 4
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup7
 
@@ -90,49 +90,49 @@ invoke.cont:                                      ; preds = %entry
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %invoke.cont
-  store i32 %j, i32* %j.addr, align 4
-  %0 = load i32, i32* %j.addr, align 4
+  store i32 %j, ptr %j.addr, align 4
+  %0 = load i32, ptr %j.addr, align 4
   %cmp = icmp sgt i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %invoke.cont3
-  %data = getelementptr inbounds %class.B1, %class.B1* %b1Bar, i32 0, i32 0
-  %1 = load i32, i32* %data, align 4
-  store i32 %1, i32* %ww, align 4
+  %data = getelementptr inbounds %class.B1, ptr %b1Bar, i32 0, i32 0
+  %1 = load i32, ptr %data, align 4
+  store i32 %1, ptr %ww, align 4
   br label %if.end
 
 if.else:                                          ; preds = %invoke.cont3
-  %2 = bitcast %class.B2* %b2Bar to %class.B1*
-  %data4 = getelementptr inbounds %class.B1, %class.B1* %2, i32 0, i32 0
-  %3 = load i32, i32* %data4, align 4
-  store i32 %3, i32* %ww, align 4
+  %2 = bitcast ptr %b2Bar to ptr
+  %data4 = getelementptr inbounds %class.B1, ptr %2, i32 0, i32 0
+  %3 = load i32, ptr %data4, align 4
+  store i32 %3, ptr %ww, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  %4 = load i32, i32* %ww, align 4
-  %5 = load i32*, i32** @"?ptr@@3PEAHEA", align 8
-  %6 = load i32, i32* %5, align 4
+  %4 = load i32, ptr %ww, align 4
+  %5 = load ptr, ptr @"?ptr@@3PEAHEA", align 8
+  %6 = load i32, ptr %5, align 4
   %add = add nsw i32 %4, %6
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont5 unwind label %ehcleanup
 
 invoke.cont5:                                     ; preds = %if.end
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %b1Bar) #8
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b1Bar) #8
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont6 unwind label %ehcleanup7
 
 ehcleanup:                                        ; preds = %if.end, %invoke.cont
   %7 = cleanuppad within none []
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %b1Bar) #8 [ "funclet"(token %7) ]
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b1Bar) #8 [ "funclet"(token %7) ]
   cleanupret from %7 unwind label %ehcleanup7
 
 invoke.cont6:                                     ; preds = %invoke.cont5
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %b2Bar) #8
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b2Bar) #8
   ret i32 %add
 
 ehcleanup7:                                       ; preds = %invoke.cont5, %ehcleanup, %entry
   %8 = cleanuppad within none []
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %b2Bar) #8 [ "funclet"(token %8) ]
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b2Bar) #8 [ "funclet"(token %8) ]
   cleanupret from %8 unwind to caller
 }
 
@@ -145,26 +145,26 @@ declare dso_local i32 @__CxxFrameHandler3(...)
 declare dso_local void @llvm.seh.scope.end() #2
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr dso_local void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 {
+define linkonce_odr dso_local void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 {
 entry:
-  %this.addr = alloca %class.B1*, align 8
-  store %class.B1* %this, %class.B1** %this.addr, align 8
-  %this1 = load %class.B1*, %class.B1** %this.addr, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@FMGAAAAM at in?5B1?5Dtor?5?6?$AA@", i64 0, i64 0))
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@FMGAAAAM at in?5B1?5Dtor?5?6?$AA@")
   ret void
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr dso_local void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define linkonce_odr dso_local void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 personality ptr @__CxxFrameHandler3 {
 entry:
-  %this.addr = alloca %class.B2*, align 8
-  store %class.B2* %this, %class.B2** %this.addr, align 8
-  %this1 = load %class.B2*, %class.B2** %this.addr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  invoke void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@GFONDMMJ at in?5B2?5Dtor?5?6?$AA@", i64 0, i64 0))
+  invoke void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@GFONDMMJ at in?5B2?5Dtor?5?6?$AA@")
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %invoke.cont
@@ -172,89 +172,89 @@ invoke.cont2:                                     ; preds = %invoke.cont
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %invoke.cont2
-  %0 = bitcast %class.B2* %this1 to %class.B1*
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %0) #8
+  %0 = bitcast ptr %this1 to ptr
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %0) #8
   ret void
 
 ehcleanup:                                        ; preds = %invoke.cont2, %invoke.cont, %entry
   %1 = cleanuppad within none []
-  %2 = bitcast %class.B2* %this1 to %class.B1*
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %2) #8 [ "funclet"(token %1) ]
+  %2 = bitcast ptr %this1 to ptr
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %2) #8 [ "funclet"(token %1) ]
   cleanupret from %1 unwind to caller
 }
 
 ; Function Attrs: noinline optnone mustprogress
-define dso_local void @"?goo@@YA?AVB1@@H at Z"(%class.B1* noalias sret(%class.B1) align 4 %agg.result, i32 %w) #1 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?goo@@YA?AVB1@@H at Z"(ptr noalias sret(%class.B1) align 4 %agg.result, i32 %w) #1 personality ptr @__CxxFrameHandler3 {
 entry:
-  %result.ptr = alloca i8*, align 8
+  %result.ptr = alloca ptr, align 8
   %w.addr = alloca i32, align 4
   %b2ingoo = alloca %class.B2, align 4
-  %0 = bitcast %class.B1* %agg.result to i8*
-  store i8* %0, i8** %result.ptr, align 8
-  store i32 %w, i32* %w.addr, align 4
-  %call = call %class.B2* @"??0B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %b2ingoo)
+  %0 = bitcast ptr %agg.result to ptr
+  store ptr %0, ptr %result.ptr, align 8
+  store i32 %w, ptr %w.addr, align 4
+  %call = call ptr @"??0B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b2ingoo)
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = load i32, i32* %w.addr, align 4
-  %2 = bitcast %class.B2* %b2ingoo to %class.B1*
-  %data = getelementptr inbounds %class.B1, %class.B1* %2, i32 0, i32 0
-  %3 = load i32, i32* %data, align 4
+  %1 = load i32, ptr %w.addr, align 4
+  %2 = bitcast ptr %b2ingoo to ptr
+  %data = getelementptr inbounds %class.B1, ptr %2, i32 0, i32 0
+  %3 = load i32, ptr %data, align 4
   %add = add nsw i32 %3, %1
-  store i32 %add, i32* %data, align 4
-  %4 = bitcast %class.B2* %b2ingoo to %class.B1*
-  %5 = bitcast %class.B1* %agg.result to i8*
-  %6 = bitcast %class.B1* %4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 %6, i64 4, i1 false)
+  store i32 %add, ptr %data, align 4
+  %4 = bitcast ptr %b2ingoo to ptr
+  %5 = bitcast ptr %agg.result to ptr
+  %6 = bitcast ptr %4 to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %5, ptr align 4 %6, i64 4, i1 false)
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont1 unwind label %ehcleanup
 
 invoke.cont1:                                     ; preds = %invoke.cont
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %b2ingoo) #8
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b2ingoo) #8
   ret void
 
 ehcleanup:                                        ; preds = %invoke.cont, %entry
   %7 = cleanuppad within none []
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %b2ingoo) #8 [ "funclet"(token %7) ]
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b2ingoo) #8 [ "funclet"(token %7) ]
   cleanupret from %7 unwind to caller
 }
 
 ; Function Attrs: noinline optnone
-define linkonce_odr dso_local %class.B2* @"??0B2@@QEAA at XZ"(%class.B2* nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #4 comdat align 2 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define linkonce_odr dso_local ptr @"??0B2@@QEAA at XZ"(ptr nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #4 comdat align 2 personality ptr @__CxxFrameHandler3 {
 entry:
-  %this.addr = alloca %class.B2*, align 8
-  store %class.B2* %this, %class.B2** %this.addr, align 8
-  %this1 = load %class.B2*, %class.B2** %this.addr, align 8
-  %0 = bitcast %class.B2* %this1 to %class.B1*
-  %call = call %class.B1* @"??0B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %0)
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %0 = bitcast ptr %this1 to ptr
+  %call = call ptr @"??0B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %0)
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = bitcast %class.B2* %this1 to %class.B1*
-  %data = getelementptr inbounds %class.B1, %class.B1* %1, i32 0, i32 0
-  %2 = load i32, i32* %data, align 4
+  %1 = bitcast ptr %this1 to ptr
+  %data = getelementptr inbounds %class.B1, ptr %1, i32 0, i32 0
+  %2 = load i32, ptr %data, align 4
   %add = add nsw i32 %2, 222
   %call2 = call i32 @"?foo@@YAHH at Z"(i32 %add)
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %invoke.cont
-  ret %class.B2* %this1
+  ret ptr %this1
 
 ehcleanup:                                        ; preds = %invoke.cont, %entry
   %3 = cleanuppad within none []
-  %4 = bitcast %class.B2* %this1 to %class.B1*
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %4) #8 [ "funclet"(token %3) ]
+  %4 = bitcast ptr %this1 to ptr
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %4) #8 [ "funclet"(token %3) ]
   cleanupret from %3 unwind to caller
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) #5
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #5
 
 ; Function Attrs: noinline norecurse optnone mustprogress
-define dso_local i32 @main() #6 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local i32 @main() #6 personality ptr @__CxxFrameHandler3 {
 entry:
   %retval = alloca i32, align 4
   %b3inmain = alloca %class.B3, align 4
@@ -267,20 +267,20 @@ entry:
   %agg.tmp = alloca %class.B2, align 4
   %agg.tmp28 = alloca %class.B1, align 4
   %b1fromgoo = alloca %class.B1, align 4
-  store i32 0, i32* %retval, align 4
-  %call = call %class.B3* @"??0B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %b3inmain)
+  store i32 0, ptr %retval, align 4
+  %call = call ptr @"??0B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b3inmain)
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup50
 
 invoke.cont:                                      ; preds = %entry
-  %0 = load i32, i32* @"?xxxx@@3HA", align 4
+  %0 = load i32, ptr @"?xxxx@@3HA", align 4
   %cmp = icmp sgt i32 %0, 1
-  store i1 false, i1* %cleanup.cond, align 1
-  store i1 false, i1* %cleanup.cond9, align 1
+  store i1 false, ptr %cleanup.cond, align 1
+  store i1 false, ptr %cleanup.cond9, align 1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %invoke.cont
-  %call2 = invoke %class.B2* @"??0B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %ref.tmp)
+  %call2 = invoke ptr @"??0B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp)
           to label %invoke.cont1 unwind label %ehcleanup50
 
 invoke.cont1:                                     ; preds = %cond.true
@@ -288,16 +288,16 @@ invoke.cont1:                                     ; preds = %cond.true
           to label %invoke.cont3 unwind label %ehcleanup21
 
 invoke.cont3:                                     ; preds = %invoke.cont1
-  store i1 true, i1* %cleanup.cond, align 1
-  %1 = bitcast %class.B2* %ref.tmp to %class.B1*
-  %data = getelementptr inbounds %class.B1, %class.B1* %1, i32 0, i32 0
-  %2 = load i32, i32* %data, align 4
+  store i1 true, ptr %cleanup.cond, align 1
+  %1 = bitcast ptr %ref.tmp to ptr
+  %data = getelementptr inbounds %class.B1, ptr %1, i32 0, i32 0
+  %2 = load i32, ptr %data, align 4
   %call4 = call i32 @"?foo@@YAHH at Z"(i32 99)
   %add = add nsw i32 %2, %call4
   br label %cond.end
 
 cond.false:                                       ; preds = %invoke.cont
-  %call7 = invoke %class.B3* @"??0B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %ref.tmp5)
+  %call7 = invoke ptr @"??0B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp5)
           to label %invoke.cont6 unwind label %ehcleanup21
 
 invoke.cont6:                                     ; preds = %cond.false
@@ -305,10 +305,10 @@ invoke.cont6:                                     ; preds = %cond.false
           to label %invoke.cont8 unwind label %ehcleanup
 
 invoke.cont8:                                     ; preds = %invoke.cont6
-  store i1 true, i1* %cleanup.cond9, align 1
-  %3 = bitcast %class.B3* %ref.tmp5 to %class.B1*
-  %data10 = getelementptr inbounds %class.B1, %class.B1* %3, i32 0, i32 0
-  %4 = load i32, i32* %data10, align 4
+  store i1 true, ptr %cleanup.cond9, align 1
+  %3 = bitcast ptr %ref.tmp5 to ptr
+  %data10 = getelementptr inbounds %class.B1, ptr %3, i32 0, i32 0
+  %4 = load i32, ptr %data10, align 4
   %call11 = call i32 @"?foo@@YAHH at Z"(i32 88)
   %add12 = add nsw i32 %4, %call11
   br label %cond.end
@@ -319,11 +319,11 @@ cond.end:                                         ; preds = %invoke.cont8, %invo
           to label %invoke.cont13 unwind label %ehcleanup
 
 invoke.cont13:                                    ; preds = %cond.end
-  %cleanup.is_active = load i1, i1* %cleanup.cond9, align 1
+  %cleanup.is_active = load i1, ptr %cleanup.cond9, align 1
   br i1 %cleanup.is_active, label %cleanup.action, label %cleanup.done
 
 cleanup.action:                                   ; preds = %invoke.cont13
-  call void @"??1B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %ref.tmp5) #8
+  call void @"??1B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp5) #8
   br label %cleanup.done
 
 cleanup.done:                                     ; preds = %cleanup.action, %invoke.cont13
@@ -331,16 +331,16 @@ cleanup.done:                                     ; preds = %cleanup.action, %in
           to label %invoke.cont17 unwind label %ehcleanup21
 
 invoke.cont17:                                    ; preds = %cleanup.done
-  %cleanup.is_active18 = load i1, i1* %cleanup.cond, align 1
+  %cleanup.is_active18 = load i1, ptr %cleanup.cond, align 1
   br i1 %cleanup.is_active18, label %cleanup.action19, label %cleanup.done20
 
 cleanup.action19:                                 ; preds = %invoke.cont17
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %ref.tmp) #8
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp) #8
   br label %cleanup.done20
 
 cleanup.done20:                                   ; preds = %cleanup.action19, %invoke.cont17
-  store i32 %cond, i32* %m, align 4
-  %call26 = invoke %class.B2* @"??0B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %agg.tmp)
+  store i32 %cond, ptr %m, align 4
+  %call26 = invoke ptr @"??0B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %agg.tmp)
           to label %invoke.cont25 unwind label %ehcleanup50
 
 invoke.cont25:                                    ; preds = %cleanup.done20
@@ -348,7 +348,7 @@ invoke.cont25:                                    ; preds = %cleanup.done20
           to label %invoke.cont27 unwind label %ehcleanup38
 
 invoke.cont27:                                    ; preds = %invoke.cont25
-  %call30 = invoke %class.B1* @"??0B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %agg.tmp28)
+  %call30 = invoke ptr @"??0B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %agg.tmp28)
           to label %invoke.cont29 unwind label %ehcleanup38
 
 invoke.cont29:                                    ; preds = %invoke.cont27
@@ -357,11 +357,11 @@ invoke.cont29:                                    ; preds = %invoke.cont27
 
 invoke.cont31:                                    ; preds = %invoke.cont29
   %call32 = call i32 @"?foo@@YAHH at Z"(i32 0)
-  %coerce.dive = getelementptr inbounds %class.B1, %class.B1* %agg.tmp28, i32 0, i32 0
-  %5 = load i32, i32* %coerce.dive, align 4
-  %coerce.dive33 = getelementptr inbounds %class.B2, %class.B2* %agg.tmp, i32 0, i32 0
-  %coerce.dive34 = getelementptr inbounds %class.B1, %class.B1* %coerce.dive33, i32 0, i32 0
-  %6 = load i32, i32* %coerce.dive34, align 4
+  %coerce.dive = getelementptr inbounds %class.B1, ptr %agg.tmp28, i32 0, i32 0
+  %5 = load i32, ptr %coerce.dive, align 4
+  %coerce.dive33 = getelementptr inbounds %class.B2, ptr %agg.tmp, i32 0, i32 0
+  %coerce.dive34 = getelementptr inbounds %class.B1, ptr %coerce.dive33, i32 0, i32 0
+  %6 = load i32, ptr %coerce.dive34, align 4
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont35 unwind label %ehcleanup36
 
@@ -374,9 +374,9 @@ invoke.cont37:                                    ; preds = %invoke.cont35
           to label %invoke.cont39 unwind label %ehcleanup50
 
 invoke.cont39:                                    ; preds = %invoke.cont37
-  store i32 %call40, i32* %i, align 4
-  %7 = load i32, i32* %i, align 4
-  invoke void @"?goo@@YA?AVB1@@H at Z"(%class.B1* sret(%class.B1) align 4 %b1fromgoo, i32 %7)
+  store i32 %call40, ptr %i, align 4
+  %7 = load i32, ptr %i, align 4
+  invoke void @"?goo@@YA?AVB1@@H at Z"(ptr sret(%class.B1) align 4 %b1fromgoo, i32 %7)
           to label %invoke.cont41 unwind label %ehcleanup50
 
 invoke.cont41:                                    ; preds = %invoke.cont39
@@ -384,25 +384,25 @@ invoke.cont41:                                    ; preds = %invoke.cont39
           to label %invoke.cont42 unwind label %ehcleanup48
 
 invoke.cont42:                                    ; preds = %invoke.cont41
-  %8 = load i32, i32* %m, align 4
-  %data43 = getelementptr inbounds %class.B1, %class.B1* %b1fromgoo, i32 0, i32 0
-  %9 = load i32, i32* %data43, align 4
+  %8 = load i32, ptr %m, align 4
+  %data43 = getelementptr inbounds %class.B1, ptr %b1fromgoo, i32 0, i32 0
+  %9 = load i32, ptr %data43, align 4
   %add44 = add nsw i32 %8, %9
-  %10 = bitcast %class.B3* %b3inmain to %class.B1*
-  %data45 = getelementptr inbounds %class.B1, %class.B1* %10, i32 0, i32 0
-  %11 = load i32, i32* %data45, align 4
+  %10 = bitcast ptr %b3inmain to ptr
+  %data45 = getelementptr inbounds %class.B1, ptr %10, i32 0, i32 0
+  %11 = load i32, ptr %data45, align 4
   %add46 = add nsw i32 %add44, %11
-  store i32 %add46, i32* %retval, align 4
+  store i32 %add46, ptr %retval, align 4
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont47 unwind label %ehcleanup48
 
 ehcleanup:                                        ; preds = %cond.end, %invoke.cont6
   %12 = cleanuppad within none []
-  %cleanup.is_active14 = load i1, i1* %cleanup.cond9, align 1
+  %cleanup.is_active14 = load i1, ptr %cleanup.cond9, align 1
   br i1 %cleanup.is_active14, label %cleanup.action15, label %cleanup.done16
 
 cleanup.action15:                                 ; preds = %ehcleanup
-  call void @"??1B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %ref.tmp5) #8 [ "funclet"(token %12) ]
+  call void @"??1B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp5) #8 [ "funclet"(token %12) ]
   br label %cleanup.done16
 
 cleanup.done16:                                   ; preds = %cleanup.action15, %ehcleanup
@@ -410,11 +410,11 @@ cleanup.done16:                                   ; preds = %cleanup.action15, %
 
 ehcleanup21:                                      ; preds = %cleanup.done, %cleanup.done16, %cond.false, %invoke.cont1
   %13 = cleanuppad within none []
-  %cleanup.is_active22 = load i1, i1* %cleanup.cond, align 1
+  %cleanup.is_active22 = load i1, ptr %cleanup.cond, align 1
   br i1 %cleanup.is_active22, label %cleanup.action23, label %cleanup.done24
 
 cleanup.action23:                                 ; preds = %ehcleanup21
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %ref.tmp) #8 [ "funclet"(token %13) ]
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %ref.tmp) #8 [ "funclet"(token %13) ]
   br label %cleanup.done24
 
 cleanup.done24:                                   ; preds = %cleanup.action23, %ehcleanup21
@@ -422,76 +422,76 @@ cleanup.done24:                                   ; preds = %cleanup.action23, %
 
 ehcleanup36:                                      ; preds = %invoke.cont31, %invoke.cont29
   %14 = cleanuppad within none []
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %agg.tmp28) #8 [ "funclet"(token %14) ]
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %agg.tmp28) #8 [ "funclet"(token %14) ]
   cleanupret from %14 unwind label %ehcleanup38
 
 ehcleanup38:                                      ; preds = %invoke.cont35, %ehcleanup36, %invoke.cont27, %invoke.cont25
   %15 = cleanuppad within none []
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %agg.tmp) #8 [ "funclet"(token %15) ]
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %agg.tmp) #8 [ "funclet"(token %15) ]
   cleanupret from %15 unwind label %ehcleanup50
 
 invoke.cont47:                                    ; preds = %invoke.cont42
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %b1fromgoo) #8
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b1fromgoo) #8
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont49 unwind label %ehcleanup50
 
 ehcleanup48:                                      ; preds = %invoke.cont42, %invoke.cont41
   %16 = cleanuppad within none []
-  call void @"??1B1@@QEAA at XZ"(%class.B1* nonnull align 4 dereferenceable(4) %b1fromgoo) #8 [ "funclet"(token %16) ]
+  call void @"??1B1@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b1fromgoo) #8 [ "funclet"(token %16) ]
   cleanupret from %16 unwind label %ehcleanup50
 
 invoke.cont49:                                    ; preds = %invoke.cont47
-  call void @"??1B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %b3inmain) #8
-  %17 = load i32, i32* %retval, align 4
+  call void @"??1B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b3inmain) #8
+  %17 = load i32, ptr %retval, align 4
   ret i32 %17
 
 ehcleanup50:                                      ; preds = %invoke.cont47, %ehcleanup48, %invoke.cont39, %invoke.cont37, %ehcleanup38, %cleanup.done20, %cleanup.done24, %cond.true, %entry
   %18 = cleanuppad within none []
-  call void @"??1B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %b3inmain) #8 [ "funclet"(token %18) ]
+  call void @"??1B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %b3inmain) #8 [ "funclet"(token %18) ]
   cleanupret from %18 unwind to caller
 }
 
 ; Function Attrs: noinline optnone
-define linkonce_odr dso_local %class.B3* @"??0B3@@QEAA at XZ"(%class.B3* nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #4 comdat align 2 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define linkonce_odr dso_local ptr @"??0B3@@QEAA at XZ"(ptr nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #4 comdat align 2 personality ptr @__CxxFrameHandler3 {
 entry:
-  %this.addr = alloca %class.B3*, align 8
-  store %class.B3* %this, %class.B3** %this.addr, align 8
-  %this1 = load %class.B3*, %class.B3** %this.addr, align 8
-  %0 = bitcast %class.B3* %this1 to %class.B2*
-  %call = call %class.B2* @"??0B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %0)
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %0 = bitcast ptr %this1 to ptr
+  %call = call ptr @"??0B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %0)
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = bitcast %class.B3* %this1 to %class.B1*
-  %data = getelementptr inbounds %class.B1, %class.B1* %1, i32 0, i32 0
-  %2 = load i32, i32* %data, align 4
+  %1 = bitcast ptr %this1 to ptr
+  %data = getelementptr inbounds %class.B1, ptr %1, i32 0, i32 0
+  %2 = load i32, ptr %data, align 4
   %add = add nsw i32 %2, 333
   %call2 = call i32 @"?foo@@YAHH at Z"(i32 %add)
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %invoke.cont
-  ret %class.B3* %this1
+  ret ptr %this1
 
 ehcleanup:                                        ; preds = %invoke.cont, %entry
   %3 = cleanuppad within none []
-  %4 = bitcast %class.B3* %this1 to %class.B2*
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %4) #8 [ "funclet"(token %3) ]
+  %4 = bitcast ptr %this1 to ptr
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %4) #8 [ "funclet"(token %3) ]
   cleanupret from %3 unwind to caller
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr dso_local void @"??1B3@@QEAA at XZ"(%class.B3* nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define linkonce_odr dso_local void @"??1B3@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 personality ptr @__CxxFrameHandler3 {
 entry:
-  %this.addr = alloca %class.B3*, align 8
-  store %class.B3* %this, %class.B3** %this.addr, align 8
-  %this1 = load %class.B3*, %class.B3** %this.addr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  invoke void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@HCJGCIIK at in?5B3?5Dtor?5?6?$AA@", i64 0, i64 0))
+  invoke void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@HCJGCIIK at in?5B3?5Dtor?5?6?$AA@")
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %invoke.cont
@@ -499,30 +499,30 @@ invoke.cont2:                                     ; preds = %invoke.cont
           to label %invoke.cont3 unwind label %ehcleanup
 
 invoke.cont3:                                     ; preds = %invoke.cont2
-  %0 = bitcast %class.B3* %this1 to %class.B2*
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %0) #8
+  %0 = bitcast ptr %this1 to ptr
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %0) #8
   ret void
 
 ehcleanup:                                        ; preds = %invoke.cont2, %invoke.cont, %entry
   %1 = cleanuppad within none []
-  %2 = bitcast %class.B3* %this1 to %class.B2*
-  call void @"??1B2@@QEAA at XZ"(%class.B2* nonnull align 4 dereferenceable(4) %2) #8 [ "funclet"(token %1) ]
+  %2 = bitcast ptr %this1 to ptr
+  call void @"??1B2@@QEAA at XZ"(ptr nonnull align 4 dereferenceable(4) %2) #8 [ "funclet"(token %1) ]
   cleanupret from %1 unwind to caller
 }
 
 ; Function Attrs: noinline nounwind optnone
-define linkonce_odr dso_local %class.B1* @"??0B1@@QEAA at XZ"(%class.B1* nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 {
+define linkonce_odr dso_local ptr @"??0B1@@QEAA at XZ"(ptr nonnull returned align 4 dereferenceable(4) %this) unnamed_addr #3 comdat align 2 {
 entry:
-  %this.addr = alloca %class.B1*, align 8
-  store %class.B1* %this, %class.B1** %this.addr, align 8
-  %this1 = load %class.B1*, %class.B1** %this.addr, align 8
-  %data = getelementptr inbounds %class.B1, %class.B1* %this1, i32 0, i32 0
-  store i32 90, i32* %data, align 4
-  %data2 = getelementptr inbounds %class.B1, %class.B1* %this1, i32 0, i32 0
-  %0 = load i32, i32* %data2, align 4
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %data = getelementptr inbounds %class.B1, ptr %this1, i32 0, i32 0
+  store i32 90, ptr %data, align 4
+  %data2 = getelementptr inbounds %class.B1, ptr %this1, i32 0, i32 0
+  %0 = load i32, ptr %data2, align 4
   %add = add nsw i32 %0, 111
   %call = call i32 @"?foo@@YAHH at Z"(i32 %add)
-  ret %class.B1* %this1
+  ret ptr %this1
 }
 
 declare dso_local void @"?printf@@YAXZZ"(...) #7

diff  --git a/llvm/test/CodeGen/X86/windows-seh-EHa-CppDtors01.ll b/llvm/test/CodeGen/X86/windows-seh-EHa-CppDtors01.ll
index 8981cebcf4990..96b31316f39a3 100644
--- a/llvm/test/CodeGen/X86/windows-seh-EHa-CppDtors01.ll
+++ b/llvm/test/CodeGen/X86/windows-seh-EHa-CppDtors01.ll
@@ -52,23 +52,23 @@ $"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@" = comdat any
 @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@" = linkonce_odr dso_local unnamed_addr constant [13 x i8] c" in A dtor \0A\00", comdat, align 1
 
 ; Function Attrs: noinline optnone
-define dso_local void @"?crash@@YAXH at Z"(i32 %i) #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?crash@@YAXH at Z"(i32 %i) #0 personality ptr @__CxxFrameHandler3 {
 entry:
   %i.addr = alloca i32, align 4
   %ObjA = alloca %struct.A, align 1
   %ObjB = alloca %struct.B, align 1
   %ObjC = alloca %struct.C, align 1
-  store i32 %i, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
   invoke void @llvm.seh.scope.begin()
           to label %invoke.cont unwind label %ehcleanup13
 
 invoke.cont:                                      ; preds = %entry
-  %0 = load i32, i32* %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %invoke.cont
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %invoke.cont
@@ -76,12 +76,12 @@ if.end:                                           ; preds = %if.then, %invoke.co
           to label %invoke.cont1 unwind label %ehcleanup11
 
 invoke.cont1:                                     ; preds = %if.end
-  %1 = load i32, i32* %i.addr, align 4
+  %1 = load i32, ptr %i.addr, align 4
   %cmp2 = icmp eq i32 %1, 1
   br i1 %cmp2, label %if.then3, label %if.end4
 
 if.then3:                                         ; preds = %invoke.cont1
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end4
 
 if.end4:                                          ; preds = %if.then3, %invoke.cont1
@@ -89,12 +89,12 @@ if.end4:                                          ; preds = %if.then3, %invoke.c
           to label %invoke.cont5 unwind label %ehcleanup
 
 invoke.cont5:                                     ; preds = %if.end4
-  %2 = load i32, i32* %i.addr, align 4
+  %2 = load i32, ptr %i.addr, align 4
   %cmp6 = icmp eq i32 %2, 2
   br i1 %cmp6, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %invoke.cont5
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end8
 
 if.end8:                                          ; preds = %if.then7, %invoke.cont5
@@ -102,32 +102,32 @@ if.end8:                                          ; preds = %if.then7, %invoke.c
           to label %invoke.cont9 unwind label %ehcleanup
 
 invoke.cont9:                                     ; preds = %if.end8
-  call void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.C* %ObjC) #6
+  call void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjC) #6
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont10 unwind label %ehcleanup11
 
 invoke.cont10:                                    ; preds = %invoke.cont9
-  call void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.B* %ObjB) #6
+  call void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjB) #6
   invoke void @llvm.seh.scope.end()
           to label %invoke.cont12 unwind label %ehcleanup13
 
 invoke.cont12:                                    ; preds = %invoke.cont10
-  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %ObjA) #6
+  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjA) #6
   ret void
 
 ehcleanup:                                        ; preds = %if.end8, %if.end4
   %3 = cleanuppad within none []
-  call void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.C* %ObjC) #6 [ "funclet"(token %3) ]
+  call void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjC) #6 [ "funclet"(token %3) ]
   cleanupret from %3 unwind label %ehcleanup11
 
 ehcleanup11:                                      ; preds = %invoke.cont9, %ehcleanup, %if.end
   %4 = cleanuppad within none []
-  call void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.B* %ObjB) #6 [ "funclet"(token %4) ]
+  call void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjB) #6 [ "funclet"(token %4) ]
   cleanupret from %4 unwind label %ehcleanup13
 
 ehcleanup13:                                      ; preds = %invoke.cont10, %ehcleanup11, %entry
   %5 = cleanuppad within none []
-  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %ObjA) #6 [ "funclet"(token %5) ]
+  call void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %ObjA) #6 [ "funclet"(token %5) ]
   cleanupret from %5 unwind to caller
 }
 
@@ -140,47 +140,47 @@ declare dso_local i32 @__CxxFrameHandler3(...)
 declare dso_local void @llvm.seh.scope.end() #1
 
 ; Function Attrs: noinline nounwind optnone
-define internal void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.C* %this) unnamed_addr #2 align 2 {
+define internal void @"??1C@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %this) unnamed_addr #2 align 2 {
 entry:
-  %this.addr = alloca %struct.C*, align 8
-  store %struct.C* %this, %struct.C** %this.addr, align 8
-  %this1 = load %struct.C*, %struct.C** %this.addr, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@FCCEEGKL@?5in?5C?5dtor?5?6?$AA@", i64 0, i64 0))
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@FCCEEGKL@?5in?5C?5dtor?5?6?$AA@")
   ret void
 }
 
 ; Function Attrs: noinline nounwind optnone
-define internal void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.B* %this) unnamed_addr #2 align 2 {
+define internal void @"??1B@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %this) unnamed_addr #2 align 2 {
 entry:
-  %this.addr = alloca %struct.B*, align 8
-  store %struct.B* %this, %struct.B** %this.addr, align 8
-  %this1 = load %struct.B*, %struct.B** %this.addr, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@EFFPFCOI@?5in?5B?5dtor?5?6?$AA@", i64 0, i64 0))
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@EFFPFCOI@?5in?5B?5dtor?5?6?$AA@")
   ret void
 }
 
 ; Function Attrs: noinline nounwind optnone
-define internal void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(%struct.A* %this) unnamed_addr #2 align 2 {
+define internal void @"??1A@?1??crash@@YAXH at Z@QEAA at XZ"(ptr %this) unnamed_addr #2 align 2 {
 entry:
-  %this.addr = alloca %struct.A*, align 8
-  store %struct.A* %this, %struct.A** %this.addr, align 8
-  %this1 = load %struct.A*, %struct.A** %this.addr, align 8
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@", i64 0, i64 0))
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0N@HMNCGOCN@?5in?5A?5dtor?5?6?$AA@")
   ret void
 }
 
 ; Function Attrs: noinline norecurse optnone
-define dso_local i32 @main() #3 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define dso_local i32 @main() #3 personality ptr @__C_specific_handler {
 entry:
   %retval = alloca i32, align 4
   %i = alloca i32, align 4
   %__exception_code = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 3
   br i1 %cmp, label %for.body, label %for.end
 
@@ -189,7 +189,7 @@ for.body:                                         ; preds = %for.cond
           to label %invoke.cont unwind label %catch.dispatch
 
 invoke.cont:                                      ; preds = %for.body
-  %1 = load volatile i32, i32* %i, align 4
+  %1 = load volatile i32, ptr %i, align 4
   invoke void @"?crash@@YAXH at Z"(i32 %1) #7
           to label %invoke.cont1 unwind label %catch.dispatch
 
@@ -201,23 +201,23 @@ catch.dispatch:                                   ; preds = %invoke.cont1, %invo
   %2 = catchswitch within none [label %__except] unwind to caller
 
 __except:                                         ; preds = %catch.dispatch
-  %3 = catchpad within %2 [i8* null]
+  %3 = catchpad within %2 [ptr null]
   catchret from %3 to label %__except3
 
 __except3:                                        ; preds = %__except
   %4 = call i32 @llvm.eh.exceptioncode(token %3)
-  store i32 %4, i32* %__exception_code, align 4
-  %5 = load i32, i32* %i, align 4
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @"??_C at _0CM@KAOHJHDK@?5Test?5CPP?5unwind?3?5in?5catch?5handl@", i64 0, i64 0), i32 %5)
+  store i32 %4, ptr %__exception_code, align 4
+  %5 = load i32, ptr %i, align 4
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0CM@KAOHJHDK@?5Test?5CPP?5unwind?3?5in?5catch?5handl@", i32 %5)
   br label %__try.cont
 
 __try.cont:                                       ; preds = %__except3, %invoke.cont2
   br label %for.inc
 
 for.inc:                                          ; preds = %__try.cont
-  %6 = load i32, i32* %i, align 4
+  %6 = load i32, ptr %i, align 4
   %inc = add nsw i32 %6, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 invoke.cont2:                                     ; preds = %invoke.cont1

diff  --git a/llvm/test/CodeGen/X86/windows-seh-EHa-TryInFinally.ll b/llvm/test/CodeGen/X86/windows-seh-EHa-TryInFinally.ll
index 4fed375649247..340a9afe4a3d5 100644
--- a/llvm/test/CodeGen/X86/windows-seh-EHa-TryInFinally.ll
+++ b/llvm/test/CodeGen/X86/windows-seh-EHa-TryInFinally.ll
@@ -33,24 +33,24 @@ $"??_C at _0CG@ENDJHCGA@?5?9?9?9?5In?5outer?5except?5handler?5i?5?$DN@" = comdat an
 @"??_C at _0CG@ENDJHCGA@?5?9?9?9?5In?5outer?5except?5handler?5i?5?$DN@" = linkonce_odr dso_local unnamed_addr constant [38 x i8] c" --- In outer except handler i = %d \0A\00", comdat, align 1
 
 ; Function Attrs: noinline norecurse optnone
-define dso_local i32 @main() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define dso_local i32 @main() #0 personality ptr @__C_specific_handler {
 entry:
   %retval = alloca i32, align 4
   %i = alloca i32, align 4
   %__exception_code = alloca i32, align 4
-  call void (...) @llvm.localescape(i32* %i)
-  store i32 0, i32* %retval, align 4
-  store i32 0, i32* %i, align 4
+  call void (...) @llvm.localescape(ptr %i)
+  store i32 0, ptr %retval, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 3
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %1 = load i32, i32* %i, align 4
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([40 x i8], [40 x i8]* @"??_C at _0CI@MDFPIOJJ@?5?9?9?9?5Test?5_Try?5in?5_finally?5?9?9?9?5i@", i64 0, i64 0), i32 %1)
+  %1 = load i32, ptr %i, align 4
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0CI@MDFPIOJJ@?5?9?9?9?5Test?5_Try?5in?5_finally?5?9?9?9?5i@", i32 %1)
   invoke void @llvm.seh.try.begin()
           to label %invoke.cont unwind label %catch.dispatch
 
@@ -59,17 +59,17 @@ invoke.cont:                                      ; preds = %for.body
           to label %invoke.cont1 unwind label %ehcleanup
 
 invoke.cont1:                                     ; preds = %invoke.cont
-  %2 = load volatile i32, i32* %i, align 4
-  invoke void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([25 x i8], [25 x i8]* @"??_C at _0BJ@OJMMAGCD@?5?5In?5outer?5_try?5i?5?$DN?5?$CFd?5?6?$AA@", i64 0, i64 0), i32 %2) #6
+  %2 = load volatile i32, ptr %i, align 4
+  invoke void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0BJ@OJMMAGCD@?5?5In?5outer?5_try?5i?5?$DN?5?$CFd?5?6?$AA@", i32 %2) #6
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %invoke.cont1
-  %3 = load volatile i32, i32* %i, align 4
+  %3 = load volatile i32, ptr %i, align 4
   %cmp3 = icmp eq i32 %3, 0
   br i1 %cmp3, label %if.then, label %if.end
 
 if.then:                                          ; preds = %invoke.cont2
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %invoke.cont2
@@ -77,8 +77,8 @@ if.end:                                           ; preds = %if.then, %invoke.co
           to label %invoke.cont4 unwind label %ehcleanup
 
 invoke.cont4:                                     ; preds = %if.end
-  %4 = call i8* @llvm.localaddress()
-  invoke void @"?fin$0 at 0@main@@"(i8 0, i8* %4) #6
+  %4 = call ptr @llvm.localaddress()
+  invoke void @"?fin$0 at 0@main@@"(i8 0, ptr %4) #6
           to label %invoke.cont5 unwind label %catch.dispatch
 
 invoke.cont5:                                     ; preds = %invoke.cont4
@@ -89,23 +89,23 @@ catch.dispatch:                                   ; preds = %invoke.cont5, %invo
   %5 = catchswitch within none [label %__except] unwind to caller
 
 __except:                                         ; preds = %catch.dispatch
-  %6 = catchpad within %5 [i8* null]
+  %6 = catchpad within %5 [ptr null]
   catchret from %6 to label %__except8
 
 __except8:                                        ; preds = %__except
   %7 = call i32 @llvm.eh.exceptioncode(token %6)
-  store i32 %7, i32* %__exception_code, align 4
-  %8 = load i32, i32* %i, align 4
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @"??_C at _0CG@ENDJHCGA@?5?9?9?9?5In?5outer?5except?5handler?5i?5?$DN@", i64 0, i64 0), i32 %8)
+  store i32 %7, ptr %__exception_code, align 4
+  %8 = load i32, ptr %i, align 4
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0CG@ENDJHCGA@?5?9?9?9?5In?5outer?5except?5handler?5i?5?$DN@", i32 %8)
   br label %__try.cont
 
 __try.cont:                                       ; preds = %__except8, %invoke.cont7
   br label %for.inc
 
 for.inc:                                          ; preds = %__try.cont
-  %9 = load i32, i32* %i, align 4
+  %9 = load i32, ptr %i, align 4
   %inc = add nsw i32 %9, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 invoke.cont7:                                     ; preds = %invoke.cont5
@@ -113,8 +113,8 @@ invoke.cont7:                                     ; preds = %invoke.cont5
 
 ehcleanup:                                        ; preds = %if.end, %invoke.cont1, %invoke.cont
   %10 = cleanuppad within none []
-  %11 = call i8* @llvm.localaddress()
-  invoke void @"?fin$0 at 0@main@@"(i8 1, i8* %11) #6 [ "funclet"(token %10) ]
+  %11 = call ptr @llvm.localaddress()
+  invoke void @"?fin$0 at 0@main@@"(i8 1, ptr %11) #6 [ "funclet"(token %10) ]
           to label %invoke.cont6 unwind label %catch.dispatch
 
 invoke.cont6:                                     ; preds = %ehcleanup
@@ -132,29 +132,29 @@ declare dso_local void @llvm.seh.try.begin() #2
 declare dso_local i32 @__C_specific_handler(...)
 
 ; Function Attrs: noinline
-define internal void @"?fin$0 at 0@main@@"(i8 %abnormal_termination, i8* %frame_pointer) #3 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define internal void @"?fin$0 at 0@main@@"(i8 %abnormal_termination, ptr %frame_pointer) #3 personality ptr @__C_specific_handler {
 entry:
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %frame_pointer, i32 0)
-  %i = bitcast i8* %0 to i32*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
+  %0 = call ptr @llvm.localrecover(ptr @main, ptr %frame_pointer, i32 0)
+  %i = bitcast ptr %0 to ptr
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
   invoke void @llvm.seh.try.begin()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = load volatile i32, i32* %i, align 4
-  invoke void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @"??_C at _0BN@HAIIIOKI@?5?5In?5outer?5_finally?5i?5?$DN?5?$CFd?5?6?$AA@", i64 0, i64 0), i32 %1) #6
+  %1 = load volatile i32, ptr %i, align 4
+  invoke void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0BN@HAIIIOKI@?5?5In?5outer?5_finally?5i?5?$DN?5?$CFd?5?6?$AA@", i32 %1) #6
           to label %invoke.cont1 unwind label %ehcleanup
 
 invoke.cont1:                                     ; preds = %invoke.cont
-  %2 = load volatile i32, i32* %i, align 4
+  %2 = load volatile i32, ptr %i, align 4
   %cmp = icmp eq i32 %2, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %invoke.cont1
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %invoke.cont1
@@ -162,35 +162,35 @@ if.end:                                           ; preds = %if.then, %invoke.co
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %if.end
-  call void @"?fin$1 at 0@main@@"(i8 0, i8* %frame_pointer)
+  call void @"?fin$1 at 0@main@@"(i8 0, ptr %frame_pointer)
   ret void
 
 ehcleanup:                                        ; preds = %if.end, %invoke.cont, %entry
   %3 = cleanuppad within none []
-  call void @"?fin$1 at 0@main@@"(i8 1, i8* %frame_pointer) [ "funclet"(token %3) ]
+  call void @"?fin$1 at 0@main@@"(i8 1, ptr %frame_pointer) [ "funclet"(token %3) ]
   cleanupret from %3 unwind to caller
 }
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.localrecover(i8*, i8*, i32 immarg) #4
+declare ptr @llvm.localrecover(ptr, ptr, i32 immarg) #4
 
 ; Function Attrs: noinline
-define internal void @"?fin$1 at 0@main@@"(i8 %abnormal_termination, i8* %frame_pointer) #3 {
+define internal void @"?fin$1 at 0@main@@"(i8 %abnormal_termination, ptr %frame_pointer) #3 {
 entry:
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %frame_pointer, i32 0)
-  %i = bitcast i8* %0 to i32*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %1 = load i32, i32* %i, align 4
-  call void (...) @"?printf@@YAXZZ"(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @"??_C at _0BN@HHKJHLBE@?5?5In?5Inner?5_finally?5i?5?$DN?5?$CFd?5?6?$AA@", i64 0, i64 0), i32 %1)
-  %2 = load i32, i32* %i, align 4
+  %0 = call ptr @llvm.localrecover(ptr @main, ptr %frame_pointer, i32 0)
+  %i = bitcast ptr %0 to ptr
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %i, align 4
+  call void (...) @"?printf@@YAXZZ"(ptr @"??_C at _0BN@HHKJHLBE@?5?5In?5Inner?5_finally?5i?5?$DN?5?$CFd?5?6?$AA@", i32 %1)
+  %2 = load i32, ptr %i, align 4
   %cmp = icmp eq i32 %2, 2
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store volatile i32 0, i32* inttoptr (i64 17 to i32*), align 4
+  store volatile i32 0, ptr inttoptr (i64 17 to ptr), align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -201,7 +201,7 @@ if.end:                                           ; preds = %if.then, %entry
 declare dso_local void @llvm.seh.try.end() #2
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.localaddress() #4
+declare ptr @llvm.localaddress() #4
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.eh.exceptioncode(token) #4


        


More information about the llvm-commits mailing list