[llvm] b006b60 - [VE] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 04:07:02 PST 2022


Author: Nikita Popov
Date: 2022-12-19T13:06:34+01:00
New Revision: b006b60dc993b2e0ba3e412c80709477241b6be6

URL: https://github.com/llvm/llvm-project/commit/b006b60dc993b2e0ba3e412c80709477241b6be6
DIFF: https://github.com/llvm/llvm-project/commit/b006b60dc993b2e0ba3e412c80709477241b6be6.diff

LOG: [VE] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/VE/Packed/vec_load.ll
    llvm/test/CodeGen/VE/Packed/vec_store.ll
    llvm/test/CodeGen/VE/Scalar/alloca.ll
    llvm/test/CodeGen/VE/Scalar/alloca_aligned.ll
    llvm/test/CodeGen/VE/Scalar/atomic.ll
    llvm/test/CodeGen/VE/Scalar/atomic_load.ll
    llvm/test/CodeGen/VE/Scalar/atomic_store.ll
    llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
    llvm/test/CodeGen/VE/Scalar/blockaddress.ll
    llvm/test/CodeGen/VE/Scalar/br_analyze.ll
    llvm/test/CodeGen/VE/Scalar/br_jt.ll
    llvm/test/CodeGen/VE/Scalar/brind.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_lsda.ll
    llvm/test/CodeGen/VE/Scalar/callstruct.ll
    llvm/test/CodeGen/VE/Scalar/fp_extload_truncstore.ll
    llvm/test/CodeGen/VE/Scalar/fp_fneg.ll
    llvm/test/CodeGen/VE/Scalar/fp_frem.ll
    llvm/test/CodeGen/VE/Scalar/frameaddr.ll
    llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll
    llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll
    llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll
    llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst-reg.ll
    llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
    llvm/test/CodeGen/VE/Scalar/lea-opt.ll
    llvm/test/CodeGen/VE/Scalar/load-align1.ll
    llvm/test/CodeGen/VE/Scalar/load-align2.ll
    llvm/test/CodeGen/VE/Scalar/load-align4.ll
    llvm/test/CodeGen/VE/Scalar/load-align8.ll
    llvm/test/CodeGen/VE/Scalar/load.ll
    llvm/test/CodeGen/VE/Scalar/load_gv.ll
    llvm/test/CodeGen/VE/Scalar/load_off.ll
    llvm/test/CodeGen/VE/Scalar/loadrri.ll
    llvm/test/CodeGen/VE/Scalar/pic_access_static_data.ll
    llvm/test/CodeGen/VE/Scalar/pic_func_call.ll
    llvm/test/CodeGen/VE/Scalar/returnaddr.ll
    llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll
    llvm/test/CodeGen/VE/Scalar/stackframe_align.ll
    llvm/test/CodeGen/VE/Scalar/stackframe_call.ll
    llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll
    llvm/test/CodeGen/VE/Scalar/stackframe_size.ll
    llvm/test/CodeGen/VE/Scalar/stacksave.ll
    llvm/test/CodeGen/VE/Scalar/store-align1.ll
    llvm/test/CodeGen/VE/Scalar/store-align2.ll
    llvm/test/CodeGen/VE/Scalar/store-align4.ll
    llvm/test/CodeGen/VE/Scalar/store-align8.ll
    llvm/test/CodeGen/VE/Scalar/store.ll
    llvm/test/CodeGen/VE/Scalar/store_gv.ll
    llvm/test/CodeGen/VE/Scalar/symbol_relocation_tls.ll
    llvm/test/CodeGen/VE/Scalar/tls.ll
    llvm/test/CodeGen/VE/Scalar/truncstore.ll
    llvm/test/CodeGen/VE/Scalar/va_arg.ll
    llvm/test/CodeGen/VE/Scalar/va_callee.ll
    llvm/test/CodeGen/VE/Scalar/va_caller.ll
    llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
    llvm/test/CodeGen/VE/VELIntrinsics/lvlgen.ll
    llvm/test/CodeGen/VE/VELIntrinsics/lvm.ll
    llvm/test/CodeGen/VE/VELIntrinsics/pack.ll
    llvm/test/CodeGen/VE/VELIntrinsics/pfchv.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vbrd.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vld.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll
    llvm/test/CodeGen/VE/VELIntrinsics/vst.ll
    llvm/test/CodeGen/VE/Vector/loadvm.ll
    llvm/test/CodeGen/VE/Vector/loadvr.ll
    llvm/test/CodeGen/VE/Vector/storevm.ll
    llvm/test/CodeGen/VE/Vector/storevr.ll
    llvm/test/CodeGen/VE/Vector/vec_gather.ll
    llvm/test/CodeGen/VE/Vector/vec_load.ll
    llvm/test/CodeGen/VE/Vector/vec_scatter.ll
    llvm/test/CodeGen/VE/Vector/vec_store.ll
    llvm/test/CodeGen/VE/Vector/vp_gather.ll
    llvm/test/CodeGen/VE/Vector/vp_scatter.ll
    llvm/test/CodeGen/VE/Vector/vp_strided_load.ll
    llvm/test/CodeGen/VE/Vector/vp_strided_store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/VE/Packed/vec_load.ll b/llvm/test/CodeGen/VE/Packed/vec_load.ll
index 59926371300a3..25ef749de0851 100644
--- a/llvm/test/CodeGen/VE/Packed/vec_load.ll
+++ b/llvm/test/CodeGen/VE/Packed/vec_load.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare <512 x float> @llvm.masked.load.v512f32.p0v512f32(<512 x float>* %0, i32 immarg %1, <512 x i1> %2, <512 x float> %3) #0
+declare <512 x float> @llvm.masked.load.v512f32.p0(ptr %0, i32 immarg %1, <512 x i1> %2, <512 x float> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <512 x float> @vec_mload_v512f32(<512 x float>* %P, <512 x i1> %M) {
+define fastcc <512 x float> @vec_mload_v512f32(ptr %P, <512 x i1> %M) {
 ; CHECK-LABEL: vec_mload_v512f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -14,21 +14,21 @@ define fastcc <512 x float> @vec_mload_v512f32(<512 x float>* %P, <512 x i1> %M)
 ; CHECK-NEXT:    vldu %v1, 8, %s0
 ; CHECK-NEXT:    vshf %v0, %v1, %v0, 8
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <512 x float> @llvm.masked.load.v512f32.p0v512f32(<512 x float>* %P, i32 16, <512 x i1> %M, <512 x float> undef)
+  %r = call <512 x float> @llvm.masked.load.v512f32.p0(ptr %P, i32 16, <512 x i1> %M, <512 x float> undef)
   ret <512 x float> %r
 }
 
 ; TODO: Packed select legalization
 ; Function Attrs: nounwind
-; define fastcc <512 x float> @vec_mload_pt_v512f32(<512 x float>* %P, <512 x float> %PT, <512 x i1> %M) {
-;   %r = call <512 x float> @llvm.masked.load.v512f32.p0v512f32(<512 x float>* %P, i32 16, <512 x i1> %M, <512 x float> %PT)
+; define fastcc <512 x float> @vec_mload_pt_v512f32(ptr %P, <512 x float> %PT, <512 x i1> %M) {
+;   %r = call <512 x float> @llvm.masked.load.v512f32.p0(ptr %P, i32 16, <512 x i1> %M, <512 x float> %PT)
 ;   ret <512 x float> %r
 ; }
 
-declare <512 x i32> @llvm.masked.load.v512i32.p0v512i32(<512 x i32>* %0, i32 immarg %1, <512 x i1> %2, <512 x i32> %3) #0
+declare <512 x i32> @llvm.masked.load.v512i32.p0(ptr %0, i32 immarg %1, <512 x i1> %2, <512 x i32> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <512 x i32> @vec_mload_v512i32(<512 x i32>* %P, <512 x i1> %M) {
+define fastcc <512 x i32> @vec_mload_v512i32(ptr %P, <512 x i1> %M) {
 ; CHECK-LABEL: vec_mload_v512i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -38,14 +38,14 @@ define fastcc <512 x i32> @vec_mload_v512i32(<512 x i32>* %P, <512 x i1> %M) {
 ; CHECK-NEXT:    vldl.zx %v1, 8, %s0
 ; CHECK-NEXT:    vshf %v0, %v1, %v0, 13
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <512 x i32> @llvm.masked.load.v512i32.p0v512i32(<512 x i32>* %P, i32 16, <512 x i1> %M, <512 x i32> undef)
+  %r = call <512 x i32> @llvm.masked.load.v512i32.p0(ptr %P, i32 16, <512 x i1> %M, <512 x i32> undef)
   ret <512 x i32> %r
 }
 
 ; TODO: Packed select legalization
 ; ; Function Attrs: nounwind
-; define fastcc <512 x i32> @vec_mload_pt_v512i32(<512 x i32>* %P, <512 x i32> %PT, <512 x i1> %M) {
-;   %r = call <512 x i32> @llvm.masked.load.v512i32.p0v512i32(<512 x i32>* %P, i32 16, <512 x i1> %M, <512 x i32> %PT)
+; define fastcc <512 x i32> @vec_mload_pt_v512i32(ptr %P, <512 x i32> %PT, <512 x i1> %M) {
+;   %r = call <512 x i32> @llvm.masked.load.v512i32.p0(ptr %P, i32 16, <512 x i1> %M, <512 x i32> %PT)
 ;   ret <512 x i32> %r
 ; }
 

diff  --git a/llvm/test/CodeGen/VE/Packed/vec_store.ll b/llvm/test/CodeGen/VE/Packed/vec_store.ll
index 2e8b651d694c9..0b94bc035a514 100644
--- a/llvm/test/CodeGen/VE/Packed/vec_store.ll
+++ b/llvm/test/CodeGen/VE/Packed/vec_store.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare void @llvm.masked.store.v512f32.p0v512f32(<512 x float>, <512 x float>*, i32 immarg, <512 x i1>)
+declare void @llvm.masked.store.v512f32.p0(<512 x float>, ptr, i32 immarg, <512 x i1>)
 
-define fastcc void @vec_mstore_v512f32(<512 x float>* %P, <512 x float> %V, <512 x i1> %M) {
+define fastcc void @vec_mstore_v512f32(ptr %P, <512 x float> %V, <512 x i1> %M) {
 ; CHECK-LABEL: vec_mstore_v512f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -13,14 +13,14 @@ define fastcc void @vec_mstore_v512f32(<512 x float>* %P, <512 x float> %V, <512
 ; CHECK-NEXT:    lea %s0, 4(, %s0)
 ; CHECK-NEXT:    vstu %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.store.v512f32.p0v512f32(<512 x float> %V, <512 x float>* %P, i32 16, <512 x i1> %M)
+  call void @llvm.masked.store.v512f32.p0(<512 x float> %V, ptr %P, i32 16, <512 x i1> %M)
   ret void
 }
 
 
-declare void @llvm.masked.store.v512i32.p0v512i32(<512 x i32>, <512 x i32>*, i32 immarg, <512 x i1>)
+declare void @llvm.masked.store.v512i32.p0(<512 x i32>, ptr, i32 immarg, <512 x i1>)
 
-define fastcc void @vec_mstore_v512i32(<512 x i32>* %P, <512 x i32> %V, <512 x i1> %M) {
+define fastcc void @vec_mstore_v512i32(ptr %P, <512 x i32> %V, <512 x i1> %M) {
 ; CHECK-LABEL: vec_mstore_v512i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 4(, %s0)
@@ -30,6 +30,6 @@ define fastcc void @vec_mstore_v512i32(<512 x i32>* %P, <512 x i32> %V, <512 x i
 ; CHECK-NEXT:    vshf %v0, %v0, %v0, 0
 ; CHECK-NEXT:    vstl %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.store.v512i32.p0v512i32(<512 x i32> %V, <512 x i32>* %P, i32 16, <512 x i1> %M)
+  call void @llvm.masked.store.v512i32.p0(<512 x i32> %V, ptr %P, i32 16, <512 x i1> %M)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/alloca.ll b/llvm/test/CodeGen/VE/Scalar/alloca.ll
index a4d349fefd0a4..e8f220298a919 100644
--- a/llvm/test/CodeGen/VE/Scalar/alloca.ll
+++ b/llvm/test/CodeGen/VE/Scalar/alloca.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
-declare void @bar(i8*, i64)
+declare void @bar(ptr, i64)
 
 ; Function Attrs: nounwind
 define void @test(i64 %n) {
@@ -20,6 +20,6 @@ define void @test(i64 %n) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %dyna = alloca i8, i64 %n, align 8
-  call void @bar(i8* %dyna, i64 %n)
+  call void @bar(ptr %dyna, i64 %n)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/alloca_aligned.ll b/llvm/test/CodeGen/VE/Scalar/alloca_aligned.ll
index 81cdcb56f792b..a1002c540c14f 100644
--- a/llvm/test/CodeGen/VE/Scalar/alloca_aligned.ll
+++ b/llvm/test/CodeGen/VE/Scalar/alloca_aligned.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
-declare void @bar(i8*, i64)
+declare void @bar(ptr, i64)
 
 ; Function Attrs: nounwind
 define void @test(i64 %n) {
@@ -24,6 +24,6 @@ define void @test(i64 %n) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %dyna = alloca i8, i64 %n, align 32
-  call void @bar(i8* %dyna, i64 %n)
+  call void @bar(ptr %dyna, i64 %n)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic.ll b/llvm/test/CodeGen/VE/Scalar/atomic.ll
index c1d8ffc606eee..2fa6b0d7bcc1d 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic.ll
@@ -35,7 +35,7 @@ define signext i8 @test_atomic_fetch_add_1() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw add i8* @c, i8 1 seq_cst
+  %0 = atomicrmw add ptr @c, i8 1 seq_cst
   ret i8 %0
 }
 
@@ -66,7 +66,7 @@ define signext i16 @test_atomic_fetch_sub_2() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw sub i16* @s, i16 1 seq_cst
+  %0 = atomicrmw sub ptr @s, i16 1 seq_cst
   ret i16 %0
 }
 
@@ -90,7 +90,7 @@ define signext i32 @test_atomic_fetch_and_4() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw and i32* @i, i32 1 seq_cst
+  %0 = atomicrmw and ptr @i, i32 1 seq_cst
   ret i32 %0
 }
 ; Function Attrs: norecurse nounwind
@@ -112,7 +112,7 @@ define i64 @test_atomic_fetch_or_8() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw or i64* @l, i64 1 seq_cst
+  %0 = atomicrmw or ptr @l, i64 1 seq_cst
   ret i64 %0
 }
 
@@ -138,7 +138,7 @@ define signext i8 @test_atomic_fetch_xor_1() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw xor i8* @c, i8 1 seq_cst
+  %0 = atomicrmw xor ptr @c, i8 1 seq_cst
   ret i8 %0
 }
 
@@ -171,7 +171,7 @@ define signext i16 @test_atomic_fetch_nand_2() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw nand i16* @s, i16 1 seq_cst
+  %0 = atomicrmw nand ptr @s, i16 1 seq_cst
   ret i16 %0
 }
 
@@ -195,7 +195,7 @@ define signext i32 @test_atomic_fetch_max_4() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw max i32* @i, i32 1 seq_cst
+  %0 = atomicrmw max ptr @i, i32 1 seq_cst
   ret i32 %0
 }
 
@@ -219,7 +219,7 @@ define signext i32 @test_atomic_fetch_min_4() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw min i32* @i, i32 1 seq_cst
+  %0 = atomicrmw min ptr @i, i32 1 seq_cst
   ret i32 %0
 }
 
@@ -245,7 +245,7 @@ define signext i32 @test_atomic_fetch_umax_4() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw umax i32* @i, i32 1 seq_cst
+  %0 = atomicrmw umax ptr @i, i32 1 seq_cst
   ret i32 %0
 }
 
@@ -271,6 +271,6 @@ define signext i32 @test_atomic_fetch_umin_4() {
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = atomicrmw umin i32* @i, i32 1 seq_cst
+  %0 = atomicrmw umin ptr @i, i32 1 seq_cst
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
index a9d3472c3d263..82be06b8f1f44 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll
@@ -75,109 +75,100 @@
 @gv_u128 = global %"struct.std::__1::atomic.45" zeroinitializer, align 16
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_load_relaxed_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i1 @_Z22atomic_load_relaxed_i1RNSt3__16atomicIbEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_relaxed_i1RNSt3__16atomicIbEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  %4 = and i8 %3, 1
-  %5 = icmp ne i8 %4, 0
-  ret i1 %5
+  %2 = load atomic i8, ptr %0 monotonic, align 1
+  %3 = and i8 %2, 1
+  %4 = icmp ne i8 %3, 0
+  ret i1 %4
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_load_relaxed_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define signext i8 @_Z22atomic_load_relaxed_i8RNSt3__16atomicIcEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_relaxed_i8RNSt3__16atomicIcEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 monotonic, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_load_relaxed_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i8 @_Z22atomic_load_relaxed_u8RNSt3__16atomicIhEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_relaxed_u8RNSt3__16atomicIhEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 monotonic, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_load_relaxed_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define signext i16 @_Z23atomic_load_relaxed_i16RNSt3__16atomicIsEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_i16RNSt3__16atomicIsEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 monotonic, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 monotonic, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_load_relaxed_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define zeroext i16 @_Z23atomic_load_relaxed_u16RNSt3__16atomicItEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_u16RNSt3__16atomicItEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 monotonic, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 monotonic, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_load_relaxed_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define signext i32 @_Z23atomic_load_relaxed_i32RNSt3__16atomicIiEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_i32RNSt3__16atomicIiEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 monotonic, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 monotonic, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_load_relaxed_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define zeroext i32 @_Z23atomic_load_relaxed_u32RNSt3__16atomicIjEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_u32RNSt3__16atomicIjEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 monotonic, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 monotonic, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_relaxed_i64RNSt3__16atomicIlEE(%"struct.std::__1::atomic.30"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_relaxed_i64RNSt3__16atomicIlEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_i64RNSt3__16atomicIlEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 monotonic, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 monotonic, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_relaxed_u64RNSt3__16atomicImEE(%"struct.std::__1::atomic.35"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_relaxed_u64RNSt3__16atomicImEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_relaxed_u64RNSt3__16atomicImEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 monotonic, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 monotonic, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_relaxed_i128RNSt3__16atomicInEE(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_relaxed_i128RNSt3__16atomicInEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_relaxed_i128RNSt3__16atomicInEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -192,17 +183,15 @@ define i128 @_Z24atomic_load_relaxed_i128RNSt3__16atomicInEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 0)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 0)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_relaxed_u128RNSt3__16atomicIoEE(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_relaxed_u128RNSt3__16atomicIoEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_relaxed_u128RNSt3__16atomicIoEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -217,128 +206,117 @@ define i128 @_Z24atomic_load_relaxed_u128RNSt3__16atomicIoEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 0)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 0)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_load_acquire_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i1 @_Z22atomic_load_acquire_i1RNSt3__16atomicIbEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_acquire_i1RNSt3__16atomicIbEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 acquire, align 1
-  %4 = and i8 %3, 1
-  %5 = icmp ne i8 %4, 0
-  ret i1 %5
+  %2 = load atomic i8, ptr %0 acquire, align 1
+  %3 = and i8 %2, 1
+  %4 = icmp ne i8 %3, 0
+  ret i1 %4
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_load_acquire_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define signext i8 @_Z22atomic_load_acquire_i8RNSt3__16atomicIcEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_acquire_i8RNSt3__16atomicIcEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 acquire, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 acquire, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_load_acquire_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i8 @_Z22atomic_load_acquire_u8RNSt3__16atomicIhEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_acquire_u8RNSt3__16atomicIhEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 acquire, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 acquire, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_load_acquire_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define signext i16 @_Z23atomic_load_acquire_i16RNSt3__16atomicIsEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_i16RNSt3__16atomicIsEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 acquire, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 acquire, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_load_acquire_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define zeroext i16 @_Z23atomic_load_acquire_u16RNSt3__16atomicItEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_u16RNSt3__16atomicItEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 acquire, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 acquire, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_load_acquire_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define signext i32 @_Z23atomic_load_acquire_i32RNSt3__16atomicIiEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_i32RNSt3__16atomicIiEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 acquire, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 acquire, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_load_acquire_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define zeroext i32 @_Z23atomic_load_acquire_u32RNSt3__16atomicIjEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_u32RNSt3__16atomicIjEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 acquire, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 acquire, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_acquire_i64RNSt3__16atomicIlEE(%"struct.std::__1::atomic.30"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_acquire_i64RNSt3__16atomicIlEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_i64RNSt3__16atomicIlEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 acquire, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 acquire, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_acquire_u64RNSt3__16atomicImEE(%"struct.std::__1::atomic.35"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_acquire_u64RNSt3__16atomicImEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_acquire_u64RNSt3__16atomicImEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 acquire, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 acquire, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_acquire_i128RNSt3__16atomicInEE(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_acquire_i128RNSt3__16atomicInEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_acquire_i128RNSt3__16atomicInEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -353,17 +331,15 @@ define i128 @_Z24atomic_load_acquire_i128RNSt3__16atomicInEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 2)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 2)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_acquire_u128RNSt3__16atomicIoEE(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_acquire_u128RNSt3__16atomicIoEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_acquire_u128RNSt3__16atomicIoEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -378,128 +354,117 @@ define i128 @_Z24atomic_load_acquire_u128RNSt3__16atomicIoEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 2)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 2)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_load_seq_cst_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i1 @_Z22atomic_load_seq_cst_i1RNSt3__16atomicIbEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_seq_cst_i1RNSt3__16atomicIbEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 seq_cst, align 1
-  %4 = and i8 %3, 1
-  %5 = icmp ne i8 %4, 0
-  ret i1 %5
+  %2 = load atomic i8, ptr %0 seq_cst, align 1
+  %3 = and i8 %2, 1
+  %4 = icmp ne i8 %3, 0
+  ret i1 %4
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_load_seq_cst_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define signext i8 @_Z22atomic_load_seq_cst_i8RNSt3__16atomicIcEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_seq_cst_i8RNSt3__16atomicIcEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 seq_cst, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 seq_cst, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_load_seq_cst_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nocapture nonnull readonly align 1 dereferenceable(1) %0) {
+define zeroext i8 @_Z22atomic_load_seq_cst_u8RNSt3__16atomicIhEE(ptr nocapture nonnull readonly align 1 dereferenceable(1) %0) {
 ; CHECK-LABEL: _Z22atomic_load_seq_cst_u8RNSt3__16atomicIhEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i8, i8* %2 seq_cst, align 1
-  ret i8 %3
+  %2 = load atomic i8, ptr %0 seq_cst, align 1
+  ret i8 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_load_seq_cst_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define signext i16 @_Z23atomic_load_seq_cst_i16RNSt3__16atomicIsEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_i16RNSt3__16atomicIsEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 seq_cst, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 seq_cst, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_load_seq_cst_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nocapture nonnull readonly align 2 dereferenceable(2) %0) {
+define zeroext i16 @_Z23atomic_load_seq_cst_u16RNSt3__16atomicItEE(ptr nocapture nonnull readonly align 2 dereferenceable(2) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_u16RNSt3__16atomicItEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i16, i16* %2 seq_cst, align 2
-  ret i16 %3
+  %2 = load atomic i16, ptr %0 seq_cst, align 2
+  ret i16 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_load_seq_cst_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define signext i32 @_Z23atomic_load_seq_cst_i32RNSt3__16atomicIiEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_i32RNSt3__16atomicIiEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 seq_cst, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 seq_cst, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_load_seq_cst_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nocapture nonnull readonly align 4 dereferenceable(4) %0) {
+define zeroext i32 @_Z23atomic_load_seq_cst_u32RNSt3__16atomicIjEE(ptr nocapture nonnull readonly align 4 dereferenceable(4) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_u32RNSt3__16atomicIjEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i32, i32* %2 seq_cst, align 4
-  ret i32 %3
+  %2 = load atomic i32, ptr %0 seq_cst, align 4
+  ret i32 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_seq_cst_i64RNSt3__16atomicIlEE(%"struct.std::__1::atomic.30"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_seq_cst_i64RNSt3__16atomicIlEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_i64RNSt3__16atomicIlEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 seq_cst, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 seq_cst, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_load_seq_cst_u64RNSt3__16atomicImEE(%"struct.std::__1::atomic.35"* nocapture nonnull readonly align 8 dereferenceable(8) %0) {
+define i64 @_Z23atomic_load_seq_cst_u64RNSt3__16atomicImEE(ptr nocapture nonnull readonly align 8 dereferenceable(8) %0) {
 ; CHECK-LABEL: _Z23atomic_load_seq_cst_u64RNSt3__16atomicImEE:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %3 = load atomic i64, i64* %2 seq_cst, align 8
-  ret i64 %3
+  %2 = load atomic i64, ptr %0 seq_cst, align 8
+  ret i64 %2
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_seq_cst_i128RNSt3__16atomicInEE(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_seq_cst_i128RNSt3__16atomicInEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_seq_cst_i128RNSt3__16atomicInEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -514,17 +479,15 @@ define i128 @_Z24atomic_load_seq_cst_i128RNSt3__16atomicInEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 5)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 5)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define i128 @_Z24atomic_load_seq_cst_u128RNSt3__16atomicIoEE(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0) {
+define i128 @_Z24atomic_load_seq_cst_u128RNSt3__16atomicIoEE(ptr nonnull align 16 dereferenceable(16) %0) {
 ; CHECK-LABEL: _Z24atomic_load_seq_cst_u128RNSt3__16atomicIoEE:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s1, 0, %s0
@@ -539,13 +502,11 @@ define i128 @_Z24atomic_load_seq_cst_u128RNSt3__16atomicIoEE(%"struct.std::__1::
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  %4 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_load(i64 16, i8* nonnull %4, i8* nonnull %3, i32 signext 5)
-  %5 = load i128, i128* %2, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @__atomic_load(i64 16, ptr nonnull %0, ptr nonnull %2, i32 signext 5)
+  %3 = load i128, ptr %2, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
 ; Function Attrs: mustprogress
@@ -561,23 +522,22 @@ define zeroext i1 @_Z26atomic_load_relaxed_stk_i1v() {
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic", align 1
-  %2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %1, i64 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
-  call void @_Z6fun_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nonnull align 1 dereferenceable(1) %1)
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  %4 = and i8 %3, 1
-  %5 = icmp ne i8 %4, 0
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
-  ret i1 %5
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+  call void @_Z6fun_i1RNSt3__16atomicIbEE(ptr nonnull align 1 dereferenceable(1) %1)
+  %2 = load atomic i8, ptr %1 monotonic, align 1
+  %3 = and i8 %2, 1
+  %4 = icmp ne i8 %3, 0
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+  ret i1 %4
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @_Z6fun_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nonnull align 1 dereferenceable(1))
+declare void @_Z6fun_i1RNSt3__16atomicIbEE(ptr nonnull align 1 dereferenceable(1))
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: mustprogress
 define signext i8 @_Z26atomic_load_relaxed_stk_i8v() {
@@ -591,15 +551,14 @@ define signext i8 @_Z26atomic_load_relaxed_stk_i8v() {
 ; CHECK-NEXT:    ld1b.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.0", align 1
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
-  call void @_Z6fun_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nonnull align 1 dereferenceable(1) %1)
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
-  ret i8 %3
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+  call void @_Z6fun_i8RNSt3__16atomicIcEE(ptr nonnull align 1 dereferenceable(1) %1)
+  %2 = load atomic i8, ptr %1 monotonic, align 1
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+  ret i8 %2
 }
 
-declare void @_Z6fun_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nonnull align 1 dereferenceable(1))
+declare void @_Z6fun_i8RNSt3__16atomicIcEE(ptr nonnull align 1 dereferenceable(1))
 
 ; Function Attrs: mustprogress
 define zeroext i8 @_Z26atomic_load_relaxed_stk_u8v() {
@@ -613,15 +572,14 @@ define zeroext i8 @_Z26atomic_load_relaxed_stk_u8v() {
 ; CHECK-NEXT:    ld1b.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.5", align 1
-  %2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
-  call void @_Z6fun_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nonnull align 1 dereferenceable(1) %1)
-  %3 = load atomic i8, i8* %2 monotonic, align 1
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
-  ret i8 %3
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+  call void @_Z6fun_u8RNSt3__16atomicIhEE(ptr nonnull align 1 dereferenceable(1) %1)
+  %2 = load atomic i8, ptr %1 monotonic, align 1
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+  ret i8 %2
 }
 
-declare void @_Z6fun_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nonnull align 1 dereferenceable(1))
+declare void @_Z6fun_u8RNSt3__16atomicIhEE(ptr nonnull align 1 dereferenceable(1))
 
 ; Function Attrs: mustprogress
 define signext i16 @_Z27atomic_load_relaxed_stk_i16v() {
@@ -635,16 +593,14 @@ define signext i16 @_Z27atomic_load_relaxed_stk_i16v() {
 ; CHECK-NEXT:    ld2b.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.10", align 2
-  %2 = bitcast %"struct.std::__1::atomic.10"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2)
-  call void @_Z7fun_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nonnull align 2 dereferenceable(2) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i16, i16* %3 monotonic, align 2
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %2)
-  ret i16 %4
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1)
+  call void @_Z7fun_i16RNSt3__16atomicIsEE(ptr nonnull align 2 dereferenceable(2) %1)
+  %2 = load atomic i16, ptr %1 monotonic, align 2
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %1)
+  ret i16 %2
 }
 
-declare void @_Z7fun_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nonnull align 2 dereferenceable(2))
+declare void @_Z7fun_i16RNSt3__16atomicIsEE(ptr nonnull align 2 dereferenceable(2))
 
 ; Function Attrs: mustprogress
 define zeroext i16 @_Z27atomic_load_relaxed_stk_u16v() {
@@ -658,16 +614,14 @@ define zeroext i16 @_Z27atomic_load_relaxed_stk_u16v() {
 ; CHECK-NEXT:    ld2b.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.15", align 2
-  %2 = bitcast %"struct.std::__1::atomic.15"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2)
-  call void @_Z7fun_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nonnull align 2 dereferenceable(2) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i16, i16* %3 monotonic, align 2
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %2)
-  ret i16 %4
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1)
+  call void @_Z7fun_u16RNSt3__16atomicItEE(ptr nonnull align 2 dereferenceable(2) %1)
+  %2 = load atomic i16, ptr %1 monotonic, align 2
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %1)
+  ret i16 %2
 }
 
-declare void @_Z7fun_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nonnull align 2 dereferenceable(2))
+declare void @_Z7fun_u16RNSt3__16atomicItEE(ptr nonnull align 2 dereferenceable(2))
 
 ; Function Attrs: mustprogress
 define signext i32 @_Z27atomic_load_relaxed_stk_i32v() {
@@ -681,16 +635,14 @@ define signext i32 @_Z27atomic_load_relaxed_stk_i32v() {
 ; CHECK-NEXT:    ldl.sx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.20", align 4
-  %2 = bitcast %"struct.std::__1::atomic.20"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
-  call void @_Z7fun_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nonnull align 4 dereferenceable(4) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i32, i32* %3 monotonic, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
-  ret i32 %4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+  call void @_Z7fun_i32RNSt3__16atomicIiEE(ptr nonnull align 4 dereferenceable(4) %1)
+  %2 = load atomic i32, ptr %1 monotonic, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+  ret i32 %2
 }
 
-declare void @_Z7fun_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nonnull align 4 dereferenceable(4))
+declare void @_Z7fun_i32RNSt3__16atomicIiEE(ptr nonnull align 4 dereferenceable(4))
 
 ; Function Attrs: mustprogress
 define zeroext i32 @_Z27atomic_load_relaxed_stk_u32v() {
@@ -704,16 +656,14 @@ define zeroext i32 @_Z27atomic_load_relaxed_stk_u32v() {
 ; CHECK-NEXT:    ldl.zx %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.25", align 4
-  %2 = bitcast %"struct.std::__1::atomic.25"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
-  call void @_Z7fun_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nonnull align 4 dereferenceable(4) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i32, i32* %3 monotonic, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
-  ret i32 %4
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+  call void @_Z7fun_u32RNSt3__16atomicIjEE(ptr nonnull align 4 dereferenceable(4) %1)
+  %2 = load atomic i32, ptr %1 monotonic, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+  ret i32 %2
 }
 
-declare void @_Z7fun_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nonnull align 4 dereferenceable(4))
+declare void @_Z7fun_u32RNSt3__16atomicIjEE(ptr nonnull align 4 dereferenceable(4))
 
 ; Function Attrs: mustprogress
 define i64 @_Z27atomic_load_relaxed_stk_i64v() {
@@ -727,16 +677,14 @@ define i64 @_Z27atomic_load_relaxed_stk_i64v() {
 ; CHECK-NEXT:    ld %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.30", align 8
-  %2 = bitcast %"struct.std::__1::atomic.30"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %2)
-  call void @_Z7fun_i64RNSt3__16atomicIlEE(%"struct.std::__1::atomic.30"* nonnull align 8 dereferenceable(8) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i64, i64* %3 monotonic, align 8
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %2)
-  ret i64 %4
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %1)
+  call void @_Z7fun_i64RNSt3__16atomicIlEE(ptr nonnull align 8 dereferenceable(8) %1)
+  %2 = load atomic i64, ptr %1 monotonic, align 8
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %1)
+  ret i64 %2
 }
 
-declare void @_Z7fun_i64RNSt3__16atomicIlEE(%"struct.std::__1::atomic.30"* nonnull align 8 dereferenceable(8))
+declare void @_Z7fun_i64RNSt3__16atomicIlEE(ptr nonnull align 8 dereferenceable(8))
 
 ; Function Attrs: mustprogress
 define i64 @_Z27atomic_load_relaxed_stk_u64v() {
@@ -750,16 +698,14 @@ define i64 @_Z27atomic_load_relaxed_stk_u64v() {
 ; CHECK-NEXT:    ld %s0, 248(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca %"struct.std::__1::atomic.35", align 8
-  %2 = bitcast %"struct.std::__1::atomic.35"* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %2)
-  call void @_Z7fun_u64RNSt3__16atomicImEE(%"struct.std::__1::atomic.35"* nonnull align 8 dereferenceable(8) %1)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = load atomic i64, i64* %3 monotonic, align 8
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %2)
-  ret i64 %4
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %1)
+  call void @_Z7fun_u64RNSt3__16atomicImEE(ptr nonnull align 8 dereferenceable(8) %1)
+  %2 = load atomic i64, ptr %1 monotonic, align 8
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %1)
+  ret i64 %2
 }
 
-declare void @_Z7fun_u64RNSt3__16atomicImEE(%"struct.std::__1::atomic.35"* nonnull align 8 dereferenceable(8))
+declare void @_Z7fun_u64RNSt3__16atomicImEE(ptr nonnull align 8 dereferenceable(8))
 
 ; Function Attrs: mustprogress
 define i128 @_Z28atomic_load_relaxed_stk_i128v() {
@@ -783,19 +729,17 @@ define i128 @_Z28atomic_load_relaxed_stk_i128v() {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca i128, align 16
   %2 = alloca %"struct.std::__1::atomic.40", align 16
-  %3 = bitcast %"struct.std::__1::atomic.40"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  call void @_Z8fun_i128RNSt3__16atomicInEE(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %2)
-  %4 = bitcast i128* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  call void @__atomic_load(i64 16, i8* nonnull %3, i8* nonnull %4, i32 signext 0)
-  %5 = load i128, i128* %1, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @_Z8fun_i128RNSt3__16atomicInEE(ptr nonnull align 16 dereferenceable(16) %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %1)
+  call void @__atomic_load(i64 16, ptr nonnull %2, ptr nonnull %1, i32 signext 0)
+  %3 = load i128, ptr %1, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %1)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
-declare void @_Z8fun_i128RNSt3__16atomicInEE(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16))
+declare void @_Z8fun_i128RNSt3__16atomicInEE(ptr nonnull align 16 dereferenceable(16))
 
 ; Function Attrs: mustprogress
 define i128 @_Z28atomic_load_relaxed_stk_u128v() {
@@ -819,19 +763,17 @@ define i128 @_Z28atomic_load_relaxed_stk_u128v() {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca i128, align 16
   %2 = alloca %"struct.std::__1::atomic.45", align 16
-  %3 = bitcast %"struct.std::__1::atomic.45"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  call void @_Z8fun_u128RNSt3__16atomicIoEE(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %2)
-  %4 = bitcast i128* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  call void @__atomic_load(i64 16, i8* nonnull %3, i8* nonnull %4, i32 signext 0)
-  %5 = load i128, i128* %1, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
-  ret i128 %5
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @_Z8fun_u128RNSt3__16atomicIoEE(ptr nonnull align 16 dereferenceable(16) %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %1)
+  call void @__atomic_load(i64 16, ptr nonnull %2, ptr nonnull %1, i32 signext 0)
+  %3 = load i128, ptr %1, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %1)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  ret i128 %3
 }
 
-declare void @_Z8fun_u128RNSt3__16atomicIoEE(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16))
+declare void @_Z8fun_u128RNSt3__16atomicIoEE(ptr nonnull align 16 dereferenceable(16))
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
 define zeroext i1 @_Z25atomic_load_relaxed_gv_i1v() {
@@ -843,7 +785,7 @@ define zeroext i1 @_Z25atomic_load_relaxed_gv_i1v() {
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i8, i8* getelementptr inbounds (%"struct.std::__1::atomic", %"struct.std::__1::atomic"* @gv_i1, i64 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i8, ptr @gv_i1 monotonic, align 4
   %2 = and i8 %1, 1
   %3 = icmp ne i8 %2, 0
   ret i1 %3
@@ -858,7 +800,7 @@ define signext i8 @_Z25atomic_load_relaxed_gv_i8v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_i8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i8, i8* getelementptr inbounds (%"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* @gv_i8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i8, ptr @gv_i8 monotonic, align 4
   ret i8 %1
 }
 
@@ -871,7 +813,7 @@ define zeroext i8 @_Z25atomic_load_relaxed_gv_u8v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_u8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i8, i8* getelementptr inbounds (%"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* @gv_u8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i8, ptr @gv_u8 monotonic, align 4
   ret i8 %1
 }
 
@@ -884,7 +826,7 @@ define signext i16 @_Z26atomic_load_relaxed_gv_i16v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_i16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i16, i16* getelementptr inbounds (%"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* @gv_i16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i16, ptr @gv_i16 monotonic, align 4
   ret i16 %1
 }
 
@@ -897,7 +839,7 @@ define zeroext i16 @_Z26atomic_load_relaxed_gv_u16v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_u16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i16, i16* getelementptr inbounds (%"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* @gv_u16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i16, ptr @gv_u16 monotonic, align 4
   ret i16 %1
 }
 
@@ -910,7 +852,7 @@ define signext i32 @_Z26atomic_load_relaxed_gv_i32v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_i32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i32, i32* getelementptr inbounds (%"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* @gv_i32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i32, ptr @gv_i32 monotonic, align 4
   ret i32 %1
 }
 
@@ -923,7 +865,7 @@ define zeroext i32 @_Z26atomic_load_relaxed_gv_u32v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_u32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i32, i32* getelementptr inbounds (%"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* @gv_u32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  %1 = load atomic i32, ptr @gv_u32 monotonic, align 4
   ret i32 %1
 }
 
@@ -936,7 +878,7 @@ define i64 @_Z26atomic_load_relaxed_gv_i64v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_i64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i64, i64* getelementptr inbounds (%"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* @gv_i64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 8
+  %1 = load atomic i64, ptr @gv_i64 monotonic, align 8
   ret i64 %1
 }
 
@@ -949,7 +891,7 @@ define i64 @_Z26atomic_load_relaxed_gv_u64v() {
 ; CHECK-NEXT:    lea.sl %s0, gv_u64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load atomic i64, i64* getelementptr inbounds (%"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* @gv_u64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 8
+  %1 = load atomic i64, ptr @gv_u64 monotonic, align 8
   ret i64 %1
 }
 
@@ -971,12 +913,11 @@ define i128 @_Z27atomic_load_relaxed_gv_i128v() {
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca i128, align 16
-  %2 = bitcast i128* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %2)
-  call void @__atomic_load(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.40"* @gv_i128 to i8*), i8* nonnull %2, i32 signext 0)
-  %3 = load i128, i128* %1, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %2)
-  ret i128 %3
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %1)
+  call void @__atomic_load(i64 16, ptr nonnull @gv_i128, ptr nonnull %1, i32 signext 0)
+  %2 = load i128, ptr %1, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %1)
+  ret i128 %2
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -997,16 +938,15 @@ define i128 @_Z27atomic_load_relaxed_gv_u128v() {
 ; CHECK-NEXT:    ld %s0, 240(, %s11)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %1 = alloca i128, align 16
-  %2 = bitcast i128* %1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %2)
-  call void @__atomic_load(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.45"* @gv_u128 to i8*), i8* nonnull %2, i32 signext 0)
-  %3 = load i128, i128* %1, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %2)
-  ret i128 %3
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %1)
+  call void @__atomic_load(i64 16, ptr nonnull @gv_u128, ptr nonnull %1, i32 signext 0)
+  %2 = load i128, ptr %1, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %1)
+  ret i128 %2
 }
 
 ; Function Attrs: nofree nounwind willreturn
-declare void @__atomic_load(i64, i8*, i8*, i32)
+declare void @__atomic_load(i64, ptr, ptr, i32)
 
 !2 = !{!3, !3, i64 0}
 !3 = !{!"__int128", !4, i64 0}

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_store.ll b/llvm/test/CodeGen/VE/Scalar/atomic_store.ll
index f7323f2f7dd5e..eb7c4d5954a81 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_store.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_store.ll
@@ -75,107 +75,98 @@
 @gv_u128 = global %"struct.std::__1::atomic.45" zeroinitializer, align 16
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_relaxed_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define void @_Z23atomic_store_relaxed_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_relaxed_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %3, i8* %4 monotonic, align 1
+  store atomic i8 %3, ptr %0 monotonic, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_relaxed_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define void @_Z23atomic_store_relaxed_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z23atomic_store_relaxed_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 monotonic, align 1
+  store atomic i8 %1, ptr %0 monotonic, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_relaxed_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define void @_Z23atomic_store_relaxed_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_relaxed_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 monotonic, align 1
+  store atomic i8 %1, ptr %0 monotonic, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define void @_Z24atomic_store_relaxed_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 monotonic, align 2
+  store atomic i16 %1, ptr %0 monotonic, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define void @_Z24atomic_store_relaxed_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 monotonic, align 2
+  store atomic i16 %1, ptr %0 monotonic, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define void @_Z24atomic_store_relaxed_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 monotonic, align 4
+  store atomic i32 %1, ptr %0 monotonic, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define void @_Z24atomic_store_relaxed_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 monotonic, align 4
+  store atomic i32 %1, ptr %0 monotonic, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_relaxed_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 monotonic, align 8
+  store atomic i64 %1, ptr %0 monotonic, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_relaxed_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_relaxed_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_relaxed_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 monotonic, align 8
+  store atomic i64 %1, ptr %0 monotonic, align 8
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_relaxed_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_relaxed_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_relaxed_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -191,17 +182,15 @@ define void @_Z25atomic_store_relaxed_i128RNSt3__16atomicInEEn(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_relaxed_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_relaxed_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_relaxed_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -217,126 +206,115 @@ define void @_Z25atomic_store_relaxed_u128RNSt3__16atomicIoEEo(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_release_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define void @_Z23atomic_store_release_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_release_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %3, i8* %4 release, align 1
+  store atomic i8 %3, ptr %0 release, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_release_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define void @_Z23atomic_store_release_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z23atomic_store_release_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 release, align 1
+  store atomic i8 %1, ptr %0 release, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_release_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define void @_Z23atomic_store_release_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_release_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 release, align 1
+  store atomic i8 %1, ptr %0 release, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define void @_Z24atomic_store_release_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 release, align 2
+  store atomic i16 %1, ptr %0 release, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define void @_Z24atomic_store_release_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 release, align 2
+  store atomic i16 %1, ptr %0 release, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define void @_Z24atomic_store_release_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 release, align 4
+  store atomic i32 %1, ptr %0 release, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define void @_Z24atomic_store_release_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 release, align 4
+  store atomic i32 %1, ptr %0 release, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_release_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 release, align 8
+  store atomic i64 %1, ptr %0 release, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_release_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_release_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_release_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 1
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 release, align 8
+  store atomic i64 %1, ptr %0 release, align 8
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_release_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_release_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_release_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -352,17 +330,15 @@ define void @_Z25atomic_store_release_i128RNSt3__16atomicInEEn(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 3)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_release_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_release_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_release_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -378,17 +354,15 @@ define void @_Z25atomic_store_release_u128RNSt3__16atomicIoEEo(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 3)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_seq_cst_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define void @_Z23atomic_store_seq_cst_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_seq_cst_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -396,117 +370,108 @@ define void @_Z23atomic_store_seq_cst_i1RNSt3__16atomicIbEEb(%"struct.std::__1::
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %3, i8* %4 seq_cst, align 1
+  store atomic i8 %3, ptr %0 seq_cst, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_seq_cst_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define void @_Z23atomic_store_seq_cst_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z23atomic_store_seq_cst_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 seq_cst, align 1
+  store atomic i8 %1, ptr %0 seq_cst, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z23atomic_store_seq_cst_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define void @_Z23atomic_store_seq_cst_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_store_seq_cst_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i8 %1, i8* %3 seq_cst, align 1
+  store atomic i8 %1, ptr %0 seq_cst, align 1
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define void @_Z24atomic_store_seq_cst_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 seq_cst, align 2
+  store atomic i16 %1, ptr %0 seq_cst, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define void @_Z24atomic_store_seq_cst_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i16 %1, i16* %3 seq_cst, align 2
+  store atomic i16 %1, ptr %0 seq_cst, align 2
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define void @_Z24atomic_store_seq_cst_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 seq_cst, align 4
+  store atomic i32 %1, ptr %0 seq_cst, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define void @_Z24atomic_store_seq_cst_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i32 %1, i32* %3 seq_cst, align 4
+  store atomic i32 %1, ptr %0 seq_cst, align 4
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_seq_cst_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 seq_cst, align 8
+  store atomic i64 %1, ptr %0 seq_cst, align 8
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define void @_Z24atomic_store_seq_cst_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define void @_Z24atomic_store_seq_cst_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z24atomic_store_seq_cst_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  store atomic i64 %1, i64* %3 seq_cst, align 8
+  store atomic i64 %1, ptr %0 seq_cst, align 8
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_seq_cst_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_seq_cst_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_seq_cst_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -522,17 +487,15 @@ define void @_Z25atomic_store_seq_cst_i128RNSt3__16atomicInEEn(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 5)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
 ; Function Attrs: nofree nounwind mustprogress
-define void @_Z25atomic_store_seq_cst_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define void @_Z25atomic_store_seq_cst_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z25atomic_store_seq_cst_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s4, 0, %s0
@@ -548,12 +511,10 @@ define void @_Z25atomic_store_seq_cst_u128RNSt3__16atomicIoEEo(%"struct.std::__1
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %5 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_store(i64 16, i8* nonnull %5, i8* nonnull %4, i32 signext 5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %0, ptr nonnull %3, i32 signext 5)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
@@ -565,18 +526,18 @@ define void @_Z26atomic_load_relaxed_stk_i1b(i1 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i8, align 1
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
   %3 = zext i1 %0 to i8
-  store atomic volatile i8 %3, i8* %2 monotonic, align 1
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
+  store atomic volatile i8 %3, ptr %2 monotonic, align 1
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
   ret void
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: nofree nounwind mustprogress
 define void @_Z26atomic_load_relaxed_stk_i8c(i8 signext %0) {
@@ -586,9 +547,9 @@ define void @_Z26atomic_load_relaxed_stk_i8c(i8 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i8, align 1
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
-  store atomic volatile i8 %0, i8* %2 monotonic, align 1
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
+  store atomic volatile i8 %0, ptr %2 monotonic, align 1
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
   ret void
 }
 
@@ -600,9 +561,9 @@ define void @_Z26atomic_load_relaxed_stk_u8h(i8 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i8, align 1
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2)
-  store atomic volatile i8 %0, i8* %2 monotonic, align 1
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
+  store atomic volatile i8 %0, ptr %2 monotonic, align 1
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
   ret void
 }
 
@@ -614,10 +575,9 @@ define void @_Z27atomic_load_relaxed_stk_i16s(i16 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i16, align 2
-  %3 = bitcast i16* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
-  store atomic volatile i16 %0, i16* %2 monotonic, align 2
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
+  store atomic volatile i16 %0, ptr %2 monotonic, align 2
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
   ret void
 }
 
@@ -629,10 +589,9 @@ define void @_Z27atomic_load_relaxed_stk_u16t(i16 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i16, align 2
-  %3 = bitcast i16* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
-  store atomic volatile i16 %0, i16* %2 monotonic, align 2
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
+  store atomic volatile i16 %0, ptr %2 monotonic, align 2
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
   ret void
 }
 
@@ -644,10 +603,9 @@ define void @_Z27atomic_load_relaxed_stk_i32i(i32 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i32, align 4
-  %3 = bitcast i32* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
-  store atomic volatile i32 %0, i32* %2 monotonic, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
+  store atomic volatile i32 %0, ptr %2 monotonic, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
   ret void
 }
 
@@ -659,10 +617,9 @@ define void @_Z27atomic_load_relaxed_stk_u32j(i32 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i32, align 4
-  %3 = bitcast i32* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
-  store atomic volatile i32 %0, i32* %2 monotonic, align 4
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
+  store atomic volatile i32 %0, ptr %2 monotonic, align 4
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
   ret void
 }
 
@@ -674,10 +631,9 @@ define void @_Z27atomic_load_relaxed_stk_i64l(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i64, align 8
-  %3 = bitcast i64* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %3)
-  store atomic volatile i64 %0, i64* %2 monotonic, align 8
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+  store atomic volatile i64 %0, ptr %2 monotonic, align 8
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
   ret void
 }
 
@@ -689,10 +645,9 @@ define void @_Z27atomic_load_relaxed_stk_u64m(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca i64, align 8
-  %3 = bitcast i64* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %3)
-  store atomic volatile i64 %0, i64* %2 monotonic, align 8
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+  store atomic volatile i64 %0, ptr %2 monotonic, align 8
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
   ret void
 }
 
@@ -713,14 +668,12 @@ define void @_Z28atomic_load_relaxed_stk_i128n(i128 %0) {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
   %3 = alloca %"struct.std::__1::atomic.40", align 16
-  %4 = bitcast %"struct.std::__1::atomic.40"* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  %5 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_store(i64 16, i8* nonnull %4, i8* nonnull %5, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %3, ptr nonnull %2, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
@@ -741,14 +694,12 @@ define void @_Z28atomic_load_relaxed_stk_u128o(i128 %0) {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
   %3 = alloca %"struct.std::__1::atomic.45", align 16
-  %4 = bitcast %"struct.std::__1::atomic.45"* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  %5 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_store(i64 16, i8* nonnull %4, i8* nonnull %5, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull %3, ptr nonnull %2, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
   ret void
 }
 
@@ -762,7 +713,7 @@ define void @_Z25atomic_load_relaxed_gv_i1b(i1 zeroext %0) {
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = zext i1 %0 to i8
-  store atomic i8 %2, i8* getelementptr inbounds (%"struct.std::__1::atomic", %"struct.std::__1::atomic"* @gv_i1, i64 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i8 %2, ptr @gv_i1 monotonic, align 4
   ret void
 }
 
@@ -775,7 +726,7 @@ define void @_Z25atomic_load_relaxed_gv_i8c(i8 signext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_i8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i8 %0, i8* getelementptr inbounds (%"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* @gv_i8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i8 %0, ptr @gv_i8 monotonic, align 4
   ret void
 }
 
@@ -788,7 +739,7 @@ define void @_Z25atomic_load_relaxed_gv_u8h(i8 zeroext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_u8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i8 %0, i8* getelementptr inbounds (%"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* @gv_u8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i8 %0, ptr @gv_u8 monotonic, align 4
   ret void
 }
 
@@ -801,7 +752,7 @@ define void @_Z26atomic_load_relaxed_gv_i16s(i16 signext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_i16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i16 %0, i16* getelementptr inbounds (%"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* @gv_i16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i16 %0, ptr @gv_i16 monotonic, align 4
   ret void
 }
 
@@ -814,7 +765,7 @@ define void @_Z26atomic_load_relaxed_gv_u16t(i16 zeroext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_u16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i16 %0, i16* getelementptr inbounds (%"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* @gv_u16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i16 %0, ptr @gv_u16 monotonic, align 4
   ret void
 }
 
@@ -827,7 +778,7 @@ define void @_Z26atomic_load_relaxed_gv_i32i(i32 signext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_i32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i32 %0, i32* getelementptr inbounds (%"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* @gv_i32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i32 %0, ptr @gv_i32 monotonic, align 4
   ret void
 }
 
@@ -840,7 +791,7 @@ define void @_Z26atomic_load_relaxed_gv_u32j(i32 zeroext %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_u32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i32 %0, i32* getelementptr inbounds (%"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* @gv_u32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 4
+  store atomic i32 %0, ptr @gv_u32 monotonic, align 4
   ret void
 }
 
@@ -853,7 +804,7 @@ define void @_Z26atomic_load_relaxed_gv_i64l(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_i64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i64 %0, i64* getelementptr inbounds (%"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* @gv_i64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 8
+  store atomic i64 %0, ptr @gv_i64 monotonic, align 8
   ret void
 }
 
@@ -866,7 +817,7 @@ define void @_Z26atomic_load_relaxed_gv_u64m(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, gv_u64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store atomic i64 %0, i64* getelementptr inbounds (%"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* @gv_u64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0) monotonic, align 8
+  store atomic i64 %0, ptr @gv_u64 monotonic, align 8
   ret void
 }
 
@@ -888,11 +839,10 @@ define void @_Z27atomic_load_relaxed_gv_i128n(i128 %0) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_store(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.40"* @gv_i128 to i8*), i8* nonnull %3, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull @gv_i128, ptr nonnull %2, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
   ret void
 }
 
@@ -914,16 +864,15 @@ define void @_Z27atomic_load_relaxed_gv_u128o(i128 %0) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
-  %3 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %3)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_store(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.45"* @gv_u128 to i8*), i8* nonnull %3, i32 signext 0)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_store(i64 16, ptr nonnull @gv_u128, ptr nonnull %2, i32 signext 0)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
   ret void
 }
 
 ; Function Attrs: nofree nounwind willreturn
-declare void @__atomic_store(i64, i8*, i8*, i32)
+declare void @__atomic_store(i64, ptr, ptr, i32)
 
 !2 = !{!3, !3, i64 0}
 !3 = !{!"__int128", !4, i64 0}

diff  --git a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
index fb03fc92e933b..87017db2af112 100644
--- a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
+++ b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll
@@ -77,7 +77,7 @@
 @gv_u128 = global %"struct.std::__1::atomic.45" zeroinitializer, align 16
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_swap_relaxed_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define zeroext i1 @_Z22atomic_swap_relaxed_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_relaxed_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -91,15 +91,14 @@ define zeroext i1 @_Z22atomic_swap_relaxed_i1RNSt3__16atomicIbEEb(%"struct.std::
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw xchg i8* %4, i8 %3 monotonic
-  %6 = and i8 %5, 1
-  %7 = icmp ne i8 %6, 0
-  ret i1 %7
+  %4 = atomicrmw xchg ptr %0, i8 %3 monotonic
+  %5 = and i8 %4, 1
+  %6 = icmp ne i8 %5, 0
+  ret i1 %6
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_swap_relaxed_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define signext i8 @_Z22atomic_swap_relaxed_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_relaxed_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -113,13 +112,12 @@ define signext i8 @_Z22atomic_swap_relaxed_i8RNSt3__16atomicIcEEc(%"struct.std::
 ; CHECK-NEXT:    sll %s0, %s0, 56
 ; CHECK-NEXT:    sra.l %s0, %s0, 56
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 monotonic
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 monotonic
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_swap_relaxed_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define zeroext i8 @_Z22atomic_swap_relaxed_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_relaxed_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -132,13 +130,12 @@ define zeroext i8 @_Z22atomic_swap_relaxed_u8RNSt3__16atomicIhEEh(%"struct.std::
 ; CHECK-NEXT:    srl %s0, %s0, %s3
 ; CHECK-NEXT:    and %s0, %s0, (56)0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 monotonic
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 monotonic
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_swap_relaxed_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define signext i16 @_Z23atomic_swap_relaxed_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -152,13 +149,12 @@ define signext i16 @_Z23atomic_swap_relaxed_i16RNSt3__16atomicIsEEs(%"struct.std
 ; CHECK-NEXT:    sll %s0, %s0, 48
 ; CHECK-NEXT:    sra.l %s0, %s0, 48
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 monotonic
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 monotonic
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_swap_relaxed_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define zeroext i16 @_Z23atomic_swap_relaxed_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -171,63 +167,58 @@ define zeroext i16 @_Z23atomic_swap_relaxed_u16RNSt3__16atomicItEEt(%"struct.std
 ; CHECK-NEXT:    srl %s0, %s0, %s3
 ; CHECK-NEXT:    and %s0, %s0, (48)0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 monotonic
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 monotonic
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_swap_relaxed_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define signext i32 @_Z23atomic_swap_relaxed_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ts1am.w %s1, (%s0), 15
 ; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 monotonic
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 monotonic
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_swap_relaxed_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define zeroext i32 @_Z23atomic_swap_relaxed_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ts1am.w %s1, (%s0), 15
 ; CHECK-NEXT:    adds.w.zx %s0, %s1, (0)1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 monotonic
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 monotonic
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_relaxed_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_relaxed_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 255
 ; CHECK-NEXT:    ts1am.l %s1, (%s0), %s2
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 monotonic
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 monotonic
+  ret i64 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_relaxed_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_relaxed_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_relaxed_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 255
 ; CHECK-NEXT:    ts1am.l %s1, (%s0), %s2
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 monotonic
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 monotonic
+  ret i64 %3
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_relaxed_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_relaxed_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_relaxed_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -247,21 +238,18 @@ define i128 @_Z24atomic_swap_relaxed_i128RNSt3__16atomicInEEn(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 0)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 0)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_relaxed_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_relaxed_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_relaxed_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -281,21 +269,18 @@ define i128 @_Z24atomic_swap_relaxed_u128RNSt3__16atomicIoEEo(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 0)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 0)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_swap_acquire_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define zeroext i1 @_Z22atomic_swap_acquire_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_acquire_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -310,15 +295,14 @@ define zeroext i1 @_Z22atomic_swap_acquire_i1RNSt3__16atomicIbEEb(%"struct.std::
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw xchg i8* %4, i8 %3 acquire
-  %6 = and i8 %5, 1
-  %7 = icmp ne i8 %6, 0
-  ret i1 %7
+  %4 = atomicrmw xchg ptr %0, i8 %3 acquire
+  %5 = and i8 %4, 1
+  %6 = icmp ne i8 %5, 0
+  ret i1 %6
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_swap_acquire_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define signext i8 @_Z22atomic_swap_acquire_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_acquire_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -333,13 +317,12 @@ define signext i8 @_Z22atomic_swap_acquire_i8RNSt3__16atomicIcEEc(%"struct.std::
 ; CHECK-NEXT:    sra.l %s0, %s0, 56
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 acquire
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 acquire
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_swap_acquire_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define zeroext i8 @_Z22atomic_swap_acquire_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_acquire_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -353,13 +336,12 @@ define zeroext i8 @_Z22atomic_swap_acquire_u8RNSt3__16atomicIhEEh(%"struct.std::
 ; CHECK-NEXT:    and %s0, %s0, (56)0
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 acquire
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 acquire
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_swap_acquire_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define signext i16 @_Z23atomic_swap_acquire_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -374,13 +356,12 @@ define signext i16 @_Z23atomic_swap_acquire_i16RNSt3__16atomicIsEEs(%"struct.std
 ; CHECK-NEXT:    sra.l %s0, %s0, 48
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 acquire
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 acquire
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_swap_acquire_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define zeroext i16 @_Z23atomic_swap_acquire_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, 3, %s0
@@ -394,39 +375,36 @@ define zeroext i16 @_Z23atomic_swap_acquire_u16RNSt3__16atomicItEEt(%"struct.std
 ; CHECK-NEXT:    and %s0, %s0, (48)0
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 acquire
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 acquire
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_swap_acquire_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define signext i32 @_Z23atomic_swap_acquire_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ts1am.w %s1, (%s0), 15
 ; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 acquire
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 acquire
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_swap_acquire_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define zeroext i32 @_Z23atomic_swap_acquire_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ts1am.w %s1, (%s0), 15
 ; CHECK-NEXT:    adds.w.zx %s0, %s1, (0)1
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 acquire
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 acquire
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_acquire_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_acquire_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 255
@@ -434,13 +412,12 @@ define i64 @_Z23atomic_swap_acquire_i64RNSt3__16atomicIlEEl(%"struct.std::__1::a
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 acquire
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 acquire
+  ret i64 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_acquire_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_acquire_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_acquire_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 255
@@ -448,13 +425,12 @@ define i64 @_Z23atomic_swap_acquire_u64RNSt3__16atomicImEEm(%"struct.std::__1::a
 ; CHECK-NEXT:    fencem 2
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 acquire
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 acquire
+  ret i64 %3
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_acquire_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_acquire_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_acquire_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -474,21 +450,18 @@ define i128 @_Z24atomic_swap_acquire_i128RNSt3__16atomicInEEn(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 2)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 2)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_acquire_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_acquire_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_acquire_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -508,21 +481,18 @@ define i128 @_Z24atomic_swap_acquire_u128RNSt3__16atomicIoEEo(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 2)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 2)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i1 @_Z22atomic_swap_seq_cst_i1RNSt3__16atomicIbEEb(%"struct.std::__1::atomic"* nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
+define zeroext i1 @_Z22atomic_swap_seq_cst_i1RNSt3__16atomicIbEEb(ptr nocapture nonnull align 1 dereferenceable(1) %0, i1 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_seq_cst_i1RNSt3__16atomicIbEEb:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -538,15 +508,14 @@ define zeroext i1 @_Z22atomic_swap_seq_cst_i1RNSt3__16atomicIbEEb(%"struct.std::
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = zext i1 %1 to i8
-  %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %0, i64 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw xchg i8* %4, i8 %3 seq_cst
-  %6 = and i8 %5, 1
-  %7 = icmp ne i8 %6, 0
-  ret i1 %7
+  %4 = atomicrmw xchg ptr %0, i8 %3 seq_cst
+  %5 = and i8 %4, 1
+  %6 = icmp ne i8 %5, 0
+  ret i1 %6
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i8 @_Z22atomic_swap_seq_cst_i8RNSt3__16atomicIcEEc(%"struct.std::__1::atomic.0"* nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
+define signext i8 @_Z22atomic_swap_seq_cst_i8RNSt3__16atomicIcEEc(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 signext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_seq_cst_i8RNSt3__16atomicIcEEc:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -562,13 +531,12 @@ define signext i8 @_Z22atomic_swap_seq_cst_i8RNSt3__16atomicIcEEc(%"struct.std::
 ; CHECK-NEXT:    sra.l %s0, %s0, 56
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 seq_cst
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 seq_cst
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i8 @_Z22atomic_swap_seq_cst_u8RNSt3__16atomicIhEEh(%"struct.std::__1::atomic.5"* nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
+define zeroext i8 @_Z22atomic_swap_seq_cst_u8RNSt3__16atomicIhEEh(ptr nocapture nonnull align 1 dereferenceable(1) %0, i8 zeroext %1) {
 ; CHECK-LABEL: _Z22atomic_swap_seq_cst_u8RNSt3__16atomicIhEEh:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -583,13 +551,12 @@ define zeroext i8 @_Z22atomic_swap_seq_cst_u8RNSt3__16atomicIhEEh(%"struct.std::
 ; CHECK-NEXT:    and %s0, %s0, (56)0
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i8* %3, i8 %1 seq_cst
-  ret i8 %4
+  %3 = atomicrmw xchg ptr %0, i8 %1 seq_cst
+  ret i8 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i16 @_Z23atomic_swap_seq_cst_i16RNSt3__16atomicIsEEs(%"struct.std::__1::atomic.10"* nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
+define signext i16 @_Z23atomic_swap_seq_cst_i16RNSt3__16atomicIsEEs(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_i16RNSt3__16atomicIsEEs:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -605,13 +572,12 @@ define signext i16 @_Z23atomic_swap_seq_cst_i16RNSt3__16atomicIsEEs(%"struct.std
 ; CHECK-NEXT:    sra.l %s0, %s0, 48
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 seq_cst
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 seq_cst
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i16 @_Z23atomic_swap_seq_cst_u16RNSt3__16atomicItEEt(%"struct.std::__1::atomic.15"* nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
+define zeroext i16 @_Z23atomic_swap_seq_cst_u16RNSt3__16atomicItEEt(ptr nocapture nonnull align 2 dereferenceable(2) %0, i16 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_u16RNSt3__16atomicItEEt:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -626,13 +592,12 @@ define zeroext i16 @_Z23atomic_swap_seq_cst_u16RNSt3__16atomicItEEt(%"struct.std
 ; CHECK-NEXT:    and %s0, %s0, (48)0
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i16* %3, i16 %1 seq_cst
-  ret i16 %4
+  %3 = atomicrmw xchg ptr %0, i16 %1 seq_cst
+  ret i16 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define signext i32 @_Z23atomic_swap_seq_cst_i32RNSt3__16atomicIiEEi(%"struct.std::__1::atomic.20"* nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
+define signext i32 @_Z23atomic_swap_seq_cst_i32RNSt3__16atomicIiEEi(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 signext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_i32RNSt3__16atomicIiEEi:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -640,13 +605,12 @@ define signext i32 @_Z23atomic_swap_seq_cst_i32RNSt3__16atomicIiEEi(%"struct.std
 ; CHECK-NEXT:    adds.w.sx %s0, %s1, (0)1
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 seq_cst
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 seq_cst
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define zeroext i32 @_Z23atomic_swap_seq_cst_u32RNSt3__16atomicIjEEj(%"struct.std::__1::atomic.25"* nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
+define zeroext i32 @_Z23atomic_swap_seq_cst_u32RNSt3__16atomicIjEEj(ptr nocapture nonnull align 4 dereferenceable(4) %0, i32 zeroext %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_u32RNSt3__16atomicIjEEj:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -654,13 +618,12 @@ define zeroext i32 @_Z23atomic_swap_seq_cst_u32RNSt3__16atomicIjEEj(%"struct.std
 ; CHECK-NEXT:    adds.w.zx %s0, %s1, (0)1
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i32* %3, i32 %1 seq_cst
-  ret i32 %4
+  %3 = atomicrmw xchg ptr %0, i32 %1 seq_cst
+  ret i32 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_seq_cst_i64RNSt3__16atomicIlEEl(%"struct.std::__1::atomic.30"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_seq_cst_i64RNSt3__16atomicIlEEl(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_i64RNSt3__16atomicIlEEl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -669,13 +632,12 @@ define i64 @_Z23atomic_swap_seq_cst_i64RNSt3__16atomicIlEEl(%"struct.std::__1::a
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 seq_cst
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 seq_cst
+  ret i64 %3
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
-define i64 @_Z23atomic_swap_seq_cst_u64RNSt3__16atomicImEEm(%"struct.std::__1::atomic.35"* nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
+define i64 @_Z23atomic_swap_seq_cst_u64RNSt3__16atomicImEEm(ptr nocapture nonnull align 8 dereferenceable(8) %0, i64 %1) {
 ; CHECK-LABEL: _Z23atomic_swap_seq_cst_u64RNSt3__16atomicImEEm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fencem 3
@@ -684,13 +646,12 @@ define i64 @_Z23atomic_swap_seq_cst_u64RNSt3__16atomicImEEm(%"struct.std::__1::a
 ; CHECK-NEXT:    fencem 3
 ; CHECK-NEXT:    or %s0, 0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %0, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %4 = atomicrmw xchg i64* %3, i64 %1 seq_cst
-  ret i64 %4
+  %3 = atomicrmw xchg ptr %0, i64 %1 seq_cst
+  ret i64 %3
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_seq_cst_i128RNSt3__16atomicInEEn(%"struct.std::__1::atomic.40"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_seq_cst_i128RNSt3__16atomicInEEn(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_seq_cst_i128RNSt3__16atomicInEEn:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -710,21 +671,18 @@ define i128 @_Z24atomic_swap_seq_cst_i128RNSt3__16atomicInEEn(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.40"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 5)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 5)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nounwind mustprogress
-define i128 @_Z24atomic_swap_seq_cst_u128RNSt3__16atomicIoEEo(%"struct.std::__1::atomic.45"* nonnull align 16 dereferenceable(16) %0, i128 %1) {
+define i128 @_Z24atomic_swap_seq_cst_u128RNSt3__16atomicIoEEo(ptr nonnull align 16 dereferenceable(16) %0, i128 %1) {
 ; CHECK-LABEL: _Z24atomic_swap_seq_cst_u128RNSt3__16atomicIoEEo:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s5, 0, %s0
@@ -744,17 +702,14 @@ define i128 @_Z24atomic_swap_seq_cst_u128RNSt3__16atomicIoEEo(%"struct.std::__1:
 ; CHECK-NEXT:    or %s11, 0, %s9
   %3 = alloca i128, align 16
   %4 = alloca i128, align 16
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  store i128 %1, i128* %3, align 16, !tbaa !2
-  %7 = bitcast %"struct.std::__1::atomic.45"* %0 to i8*
-  call void @__atomic_exchange(i64 16, i8* nonnull %7, i8* nonnull %5, i8* nonnull %6, i32 signext 5)
-  %8 = load i128, i128* %4, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  store i128 %1, ptr %3, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %0, ptr nonnull %3, ptr nonnull %4, i32 signext 5)
+  %5 = load i128, ptr %4, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -769,21 +724,20 @@ define zeroext i1 @_Z26atomic_swap_relaxed_stk_i1b(i1 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic", align 1
-  %3 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %2, i64 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
-  %4 = zext i1 %0 to i8
-  %5 = atomicrmw volatile xchg i8* %3, i8 %4 monotonic
-  %6 = and i8 %5, 1
-  %7 = icmp ne i8 %6, 0
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %3)
-  ret i1 %7
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
+  %3 = zext i1 %0 to i8
+  %4 = atomicrmw volatile xchg ptr %2, i8 %3 monotonic
+  %5 = and i8 %4, 1
+  %6 = icmp ne i8 %5, 0
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
+  ret i1 %6
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: nofree nounwind mustprogress
 define signext i8 @_Z26atomic_swap_relaxed_stk_i8c(i8 signext %0) {
@@ -798,11 +752,10 @@ define signext i8 @_Z26atomic_swap_relaxed_stk_i8c(i8 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.0", align 1
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
-  %4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %3)
-  ret i8 %4
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
+  ret i8 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -817,11 +770,10 @@ define zeroext i8 @_Z26atomic_swap_relaxed_stk_u8h(i8 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.5", align 1
-  %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3)
-  %4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %3)
-  ret i8 %4
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2)
+  ret i8 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -837,12 +789,10 @@ define signext i16 @_Z27atomic_swap_relaxed_stk_i16s(i16 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.10", align 2
-  %3 = bitcast %"struct.std::__1::atomic.10"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i16* %4, i16 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %3)
-  ret i16 %5
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
+  ret i16 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -857,12 +807,10 @@ define zeroext i16 @_Z27atomic_swap_relaxed_stk_u16t(i16 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.15", align 2
-  %3 = bitcast %"struct.std::__1::atomic.15"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i16* %4, i16 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 2, i8* nonnull %3)
-  ret i16 %5
+  call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2)
+  ret i16 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -874,12 +822,10 @@ define signext i32 @_Z27atomic_swap_relaxed_stk_i32i(i32 signext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.20", align 4
-  %3 = bitcast %"struct.std::__1::atomic.20"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i32* %4, i32 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %3)
-  ret i32 %5
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
+  ret i32 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -891,12 +837,10 @@ define zeroext i32 @_Z27atomic_swap_relaxed_stk_u32j(i32 zeroext %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.25", align 4
-  %3 = bitcast %"struct.std::__1::atomic.25"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i32* %4, i32 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %3)
-  ret i32 %5
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2)
+  ret i32 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -908,12 +852,10 @@ define i64 @_Z27atomic_swap_relaxed_stk_i64l(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.30", align 8
-  %3 = bitcast %"struct.std::__1::atomic.30"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i64* %4, i64 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %3)
-  ret i64 %5
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i64 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
+  ret i64 %3
 }
 
 ; Function Attrs: nofree nounwind mustprogress
@@ -925,12 +867,10 @@ define i64 @_Z27atomic_swap_relaxed_stk_u64m(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca %"struct.std::__1::atomic.35", align 8
-  %3 = bitcast %"struct.std::__1::atomic.35"* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %3)
-  %4 = getelementptr inbounds %"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
-  %5 = atomicrmw volatile xchg i64* %4, i64 %0 monotonic
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %3)
-  ret i64 %5
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+  %3 = atomicrmw volatile xchg ptr %2, i64 %0 monotonic
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
+  ret i64 %3
 }
 
 ; Function Attrs: nounwind mustprogress
@@ -954,19 +894,16 @@ define i128 @_Z28atomic_swap_relaxed_stk_i128n(i128 %0) {
   %2 = alloca i128, align 16
   %3 = alloca i128, align 16
   %4 = alloca %"struct.std::__1::atomic.40", align 16
-  %5 = bitcast %"struct.std::__1::atomic.40"* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  %7 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %7)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_exchange(i64 16, i8* nonnull %5, i8* nonnull %6, i8* nonnull %7, i32 signext 0)
-  %8 = load i128, i128* %3, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %7)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %4, ptr nonnull %2, ptr nonnull %3, i32 signext 0)
+  %5 = load i128, ptr %3, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nounwind mustprogress
@@ -990,19 +927,16 @@ define i128 @_Z28atomic_swap_relaxed_stk_u128o(i128 %0) {
   %2 = alloca i128, align 16
   %3 = alloca i128, align 16
   %4 = alloca %"struct.std::__1::atomic.45", align 16
-  %5 = bitcast %"struct.std::__1::atomic.45"* %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  %6 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %6)
-  %7 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %7)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_exchange(i64 16, i8* nonnull %5, i8* nonnull %6, i8* nonnull %7, i32 signext 0)
-  %8 = load i128, i128* %3, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %6)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %7)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  ret i128 %8
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %4)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull %4, ptr nonnull %2, ptr nonnull %3, i32 signext 0)
+  %5 = load i128, ptr %3, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %4)
+  ret i128 %5
 }
 
 ; Function Attrs: nofree norecurse nounwind mustprogress
@@ -1023,7 +957,7 @@ define zeroext i1 @_Z25atomic_swap_relaxed_gv_i1b(i1 zeroext %0) {
 ; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = zext i1 %0 to i8
-  %3 = atomicrmw xchg i8* getelementptr inbounds (%"struct.std::__1::atomic", %"struct.std::__1::atomic"* @gv_i1, i64 0, i32 0, i32 0, i32 0, i32 0), i8 %2 monotonic
+  %3 = atomicrmw xchg ptr @gv_i1, i8 %2 monotonic
   %4 = and i8 %3, 1
   %5 = icmp ne i8 %4, 0
   ret i1 %5
@@ -1047,7 +981,7 @@ define signext i8 @_Z25atomic_swap_relaxed_gv_i8c(i8 signext %0) {
 ; CHECK-NEXT:    sll %s0, %s0, 56
 ; CHECK-NEXT:    sra.l %s0, %s0, 56
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i8* getelementptr inbounds (%"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* @gv_i8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i8 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_i8, i8 %0 monotonic
   ret i8 %2
 }
 
@@ -1068,7 +1002,7 @@ define zeroext i8 @_Z25atomic_swap_relaxed_gv_u8h(i8 zeroext %0) {
 ; CHECK-NEXT:    srl %s0, %s0, %s3
 ; CHECK-NEXT:    and %s0, %s0, (56)0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i8* getelementptr inbounds (%"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* @gv_u8, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i8 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_u8, i8 %0 monotonic
   ret i8 %2
 }
 
@@ -1090,7 +1024,7 @@ define signext i16 @_Z26atomic_swap_relaxed_gv_i16s(i16 signext %0) {
 ; CHECK-NEXT:    sll %s0, %s0, 48
 ; CHECK-NEXT:    sra.l %s0, %s0, 48
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i16* getelementptr inbounds (%"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* @gv_i16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i16 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_i16, i16 %0 monotonic
   ret i16 %2
 }
 
@@ -1111,7 +1045,7 @@ define zeroext i16 @_Z26atomic_swap_relaxed_gv_u16t(i16 zeroext %0) {
 ; CHECK-NEXT:    srl %s0, %s0, %s3
 ; CHECK-NEXT:    and %s0, %s0, (48)0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i16* getelementptr inbounds (%"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* @gv_u16, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i16 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_u16, i16 %0 monotonic
   ret i16 %2
 }
 
@@ -1125,7 +1059,7 @@ define signext i32 @_Z26atomic_swap_relaxed_gv_i32i(i32 signext %0) {
 ; CHECK-NEXT:    ts1am.w %s0, (%s1), 15
 ; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i32* getelementptr inbounds (%"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* @gv_i32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i32 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_i32, i32 %0 monotonic
   ret i32 %2
 }
 
@@ -1139,7 +1073,7 @@ define zeroext i32 @_Z26atomic_swap_relaxed_gv_u32j(i32 zeroext %0) {
 ; CHECK-NEXT:    ts1am.w %s0, (%s1), 15
 ; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i32* getelementptr inbounds (%"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* @gv_u32, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i32 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_u32, i32 %0 monotonic
   ret i32 %2
 }
 
@@ -1153,7 +1087,7 @@ define i64 @_Z26atomic_swap_relaxed_gv_i64l(i64 %0) {
 ; CHECK-NEXT:    lea %s2, 255
 ; CHECK-NEXT:    ts1am.l %s0, (%s1), %s2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i64* getelementptr inbounds (%"struct.std::__1::atomic.30", %"struct.std::__1::atomic.30"* @gv_i64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i64 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_i64, i64 %0 monotonic
   ret i64 %2
 }
 
@@ -1167,7 +1101,7 @@ define i64 @_Z26atomic_swap_relaxed_gv_u64m(i64 %0) {
 ; CHECK-NEXT:    lea %s2, 255
 ; CHECK-NEXT:    ts1am.l %s0, (%s1), %s2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = atomicrmw xchg i64* getelementptr inbounds (%"struct.std::__1::atomic.35", %"struct.std::__1::atomic.35"* @gv_u64, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0), i64 %0 monotonic
+  %2 = atomicrmw xchg ptr @gv_u64, i64 %0 monotonic
   ret i64 %2
 }
 
@@ -1193,16 +1127,14 @@ define i128 @_Z27atomic_swap_relaxed_gv_i128n(i128 %0) {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_exchange(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.40"* @gv_i128 to i8*), i8* nonnull %4, i8* nonnull %5, i32 signext 0)
-  %6 = load i128, i128* %3, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  ret i128 %6
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull @gv_i128, ptr nonnull %2, ptr nonnull %3, i32 signext 0)
+  %4 = load i128, ptr %3, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  ret i128 %4
 }
 
 ; Function Attrs: nounwind mustprogress
@@ -1227,20 +1159,18 @@ define i128 @_Z27atomic_swap_relaxed_gv_u128o(i128 %0) {
 ; CHECK-NEXT:    or %s11, 0, %s9
   %2 = alloca i128, align 16
   %3 = alloca i128, align 16
-  %4 = bitcast i128* %2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %4)
-  %5 = bitcast i128* %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %5)
-  store i128 %0, i128* %2, align 16, !tbaa !2
-  call void @__atomic_exchange(i64 16, i8* nonnull bitcast (%"struct.std::__1::atomic.45"* @gv_u128 to i8*), i8* nonnull %4, i8* nonnull %5, i32 signext 0)
-  %6 = load i128, i128* %3, align 16, !tbaa !2
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %4)
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %5)
-  ret i128 %6
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %3)
+  store i128 %0, ptr %2, align 16, !tbaa !2
+  call void @__atomic_exchange(i64 16, ptr nonnull @gv_u128, ptr nonnull %2, ptr nonnull %3, i32 signext 0)
+  %4 = load i128, ptr %3, align 16, !tbaa !2
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %2)
+  call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %3)
+  ret i128 %4
 }
 
 ; Function Attrs: nounwind willreturn
-declare void @__atomic_exchange(i64, i8*, i8*, i8*, i32)
+declare void @__atomic_exchange(i64, ptr, ptr, ptr, i32)
 
 !2 = !{!3, !3, i64 0}
 !3 = !{!"__int128", !4, i64 0}

diff  --git a/llvm/test/CodeGen/VE/Scalar/blockaddress.ll b/llvm/test/CodeGen/VE/Scalar/blockaddress.ll
index b0f449a8ad526..ddbeca3cf30d8 100644
--- a/llvm/test/CodeGen/VE/Scalar/blockaddress.ll
+++ b/llvm/test/CodeGen/VE/Scalar/blockaddress.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=ve | FileCheck %s
 
- at addr = global i8* null, align 8
+ at addr = global ptr null, align 8
 
 ; Function Attrs: nofree norecurse nounwind writeonly
 define void @test() {
@@ -20,6 +20,6 @@ entry:
   br label %test1
 
 test1:
-  store i8* blockaddress(@test, %test1), i8** @addr, align 8
+  store ptr blockaddress(@test, %test1), ptr @addr, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/br_analyze.ll b/llvm/test/CodeGen/VE/Scalar/br_analyze.ll
index b983fb67c2eed..78b9708ab8223 100644
--- a/llvm/test/CodeGen/VE/Scalar/br_analyze.ll
+++ b/llvm/test/CodeGen/VE/Scalar/br_analyze.ll
@@ -6,7 +6,7 @@ declare void @foo() noreturn
 ;;; Check a case where a separate branch is needed and where the original
 ;;; order should be reversed.  Copied from SystemZ/branch-08.ll
 
-define i32 @f1(i32 %a, i32 *%bptr) {
+define i32 @f1(i32 %a, ptr %bptr) {
 ; CHECK-LABEL: f1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -39,7 +39,7 @@ define i32 @f1(i32 %a, i32 *%bptr) {
 ; CHECK-NEXT:    lea.sl %s12, foo at hi(, %s0)
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 entry:
-  %b = load i32, i32 *%bptr
+  %b = load i32, ptr %bptr
   %cmp = icmp ult i32 %a, %b
   br i1 %cmp, label %callit, label %return
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/br_jt.ll b/llvm/test/CodeGen/VE/Scalar/br_jt.ll
index 3000ff27251ad..bc7b26abe7e04 100644
--- a/llvm/test/CodeGen/VE/Scalar/br_jt.ll
+++ b/llvm/test/CodeGen/VE/Scalar/br_jt.ll
@@ -123,8 +123,8 @@ define signext i32 @br_jt4(i32 signext %0) {
 
 4:                                                ; preds = %1
   %5 = sext i32 %2 to i64
-  %6 = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.br_jt4, i64 0, i64 %5
-  %7 = load i32, i32* %6, align 4
+  %6 = getelementptr inbounds [4 x i32], ptr @switch.table.br_jt4, i64 0, i64 %5
+  %7 = load i32, ptr %6, align 4
   ret i32 %7
 
 8:                                                ; preds = %1
@@ -204,8 +204,8 @@ define signext i32 @br_jt7(i32 signext %0) {
 
 9:                                                ; preds = %4
   %10 = sext i32 %2 to i64
-  %11 = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table.br_jt7, i64 0, i64 %10
-  %12 = load i32, i32* %11, align 4
+  %11 = getelementptr inbounds [9 x i32], ptr @switch.table.br_jt7, i64 0, i64 %10
+  %12 = load i32, ptr %11, align 4
   ret i32 %12
 
 13:                                               ; preds = %1, %4
@@ -285,8 +285,8 @@ define signext i32 @br_jt8(i32 signext %0) {
 
 9:                                                ; preds = %4
   %10 = sext i32 %2 to i64
-  %11 = getelementptr inbounds [9 x i32], [9 x i32]* @switch.table.br_jt8, i64 0, i64 %10
-  %12 = load i32, i32* %11, align 4
+  %11 = getelementptr inbounds [9 x i32], ptr @switch.table.br_jt8, i64 0, i64 %10
+  %12 = load i32, ptr %11, align 4
   ret i32 %12
 
 13:                                               ; preds = %1, %4

diff  --git a/llvm/test/CodeGen/VE/Scalar/brind.ll b/llvm/test/CodeGen/VE/Scalar/brind.ll
index f1142827c80e6..907f0a0750415 100644
--- a/llvm/test/CodeGen/VE/Scalar/brind.ll
+++ b/llvm/test/CodeGen/VE/Scalar/brind.ll
@@ -33,10 +33,10 @@ define signext i32 @brind(i32 signext %0) {
 ; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = icmp eq i32 %0, 1
-  %3 = select i1 %2, i8* blockaddress(@brind, %6), i8* blockaddress(@brind, %8)
+  %3 = select i1 %2, ptr blockaddress(@brind, %6), ptr blockaddress(@brind, %8)
   %4 = icmp eq i32 %0, 0
-  %5 = select i1 %4, i8* %3, i8* blockaddress(@brind, %7)
-  indirectbr i8* %5, [label %8, label %6, label %7]
+  %5 = select i1 %4, ptr %3, ptr blockaddress(@brind, %7)
+  indirectbr ptr %5, [label %8, label %6, label %7]
 
 6:                                                ; preds = %1
   br label %8

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
index d803a5fe7f266..2713d61d16c2f 100644
--- a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
@@ -140,22 +140,22 @@ define signext i32 @t_setjmp() {
 ; PIC-NEXT:    ld %s10, 8(, %s11)
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  %1 = call i8* @llvm.frameaddress(i32 0)
-  store i8* %1, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8**), align 8
-  %2 = call i8* @llvm.stacksave()
-  store i8* %2, i8** getelementptr inbounds (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8**), i64 2), align 8
-  %3 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8*))
+  %1 = call ptr @llvm.frameaddress(i32 0)
+  store ptr %1, ptr @buf, align 8
+  %2 = call ptr @llvm.stacksave()
+  store ptr %2, ptr getelementptr inbounds (ptr, ptr @buf, i64 2), align 8
+  %3 = call i32 @llvm.eh.sjlj.setjmp(ptr @buf)
   ret i32 %3
 }
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
 
 ; Function Attrs: nounwind
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
 
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.sjlj.setjmp(i8*)
+declare i32 @llvm.eh.sjlj.setjmp(ptr)
 
 ; Function Attrs: noinline nounwind optnone
 define void @t_longjmp() {
@@ -202,12 +202,12 @@ define void @t_longjmp() {
 ; PIC-NEXT:    or %s10, 0, %s0
 ; PIC-NEXT:    ld %s11, 16(, %s0)
 ; PIC-NEXT:    b.l.t (, %s1)
-  call void @llvm.eh.sjlj.longjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8*))
+  call void @llvm.eh.sjlj.longjmp(ptr @buf)
   unreachable
                                                   ; No predecessors!
   ret void
 }
 
 ; Function Attrs: noreturn nounwind
-declare void @llvm.eh.sjlj.longjmp(i8*)
+declare void @llvm.eh.sjlj.longjmp(ptr)
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
index 0e5ca3dba6a92..6fdf71da75c4a 100644
--- a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
@@ -1,11 +1,11 @@
 ; RUN: llc < %s -mtriple=ve | FileCheck %s
 
 %Foo = type { [125 x i8] }
-declare void @whatever(i64, %Foo*, i8**, i8*, i8*, i32)  #0
-declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind
+declare void @whatever(i64, ptr, ptr, ptr, ptr, i32)  #0
+declare i32 @llvm.eh.sjlj.setjmp(ptr) nounwind
 
 ; Function Attrs: noinline nounwind optnone
-define i32 @t_setjmp(i64 %n, %Foo* byval(%Foo) nocapture readnone align 8 %f) {
+define i32 @t_setjmp(i64 %n, ptr byval(%Foo) nocapture readnone align 8 %f) {
 ; CHECK-LABEL: t_setjmp:
 ; CHECK:       .LBB{{[0-9]+}}_5:
 ; CHECK-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
@@ -75,13 +75,12 @@ define i32 @t_setjmp(i64 %n, %Foo* byval(%Foo) nocapture readnone align 8 %f) {
 ; CHECK-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %buf = alloca [5 x i8*], align 16
-  %p = alloca i8*, align 8
+  %buf = alloca [5 x ptr], align 16
+  %p = alloca ptr, align 8
   %q = alloca i8, align 64
-  %r = bitcast [5 x i8*]* %buf to i8*
   %s = alloca i8, i64 %n, align 1
-  store i8* %s, i8** %p, align 8
-  %t = call i32 @llvm.eh.sjlj.setjmp(i8* %s)
-  call void @whatever(i64 %n, %Foo* %f, i8** %p, i8* %q, i8* %s, i32 %t) #1
+  store ptr %s, ptr %p, align 8
+  %t = call i32 @llvm.eh.sjlj.setjmp(ptr %s)
+  call void @whatever(i64 %n, ptr %f, ptr %p, ptr %q, ptr %s, i32 %t) #1
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
index 19ff37920bc08..67237625ad207 100644
--- a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
@@ -4,7 +4,7 @@
 ; RUN:     FileCheck %s -check-prefix=PIC
 
 ; Function Attrs: noinline nounwind optnone
-define void @test_callsite() personality i32 (...)* @__gxx_personality_sj0 {
+define void @test_callsite() personality ptr @__gxx_personality_sj0 {
 ; CHECK-LABEL: test_callsite:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -251,24 +251,23 @@ define void @test_callsite() personality i32 (...)* @__gxx_personality_sj0 {
 ; PIC-NEXT:    ld %s0, -88(, %s9)
 ; PIC-NEXT:    ld %s0, -80(, %s9)
 ; PIC-NEXT:    br.l.t .LBB0_2
-  %fn_context = alloca { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, align 4
+  %fn_context = alloca { ptr, i32, [4 x i32], ptr, ptr, [5 x ptr] }, align 4
   call void @llvm.eh.sjlj.callsite(i32 0)
   invoke void @f()
           to label %try.cont unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %1 = landingpad { i8*, i32 }
+  %1 = landingpad { ptr, i32 }
           cleanup
-;;  %__data = getelementptr { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }* %fn_context, i32 0, i32 2
-;;  %exception_gep = getelementptr [4 x i32], [4 x i32]* %__data, i32 0, i32 0
-;;  %exn_val = load volatile i32, i32* %exception_gep, align 4
-;;  %2 = inttoptr i32 %exn_val to i8*
-;;  %exn_selector_gep = getelementptr [4 x i32], [4 x i32]* %__data, i32 0, i32 1
-;;  %exn_selector_val = load volatile i32, i32* %exn_selector_gep, align 4
+;;  %__data = getelementptr { ptr, i32, [4 x i32], ptr, ptr, [5 x ptr] }, ptr %fn_context, i32 0, i32 2
+;;  %exn_val = load volatile i32, ptr %__data, align 4
+;;  %2 = inttoptr i32 %exn_val to ptr
+;;  %exn_selector_gep = getelementptr [4 x i32], ptr %__data, i32 0, i32 1
+;;  %exn_selector_val = load volatile i32, ptr %exn_selector_gep, align 4
   br label %try.cont
 
 try.cont:                                         ; preds = %lpad, %entry
-  call void @_Unwind_SjLj_Unregister({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }* %fn_context)
+  call void @_Unwind_SjLj_Unregister(ptr %fn_context)
   ret void
 }
 
@@ -276,7 +275,7 @@ declare void @f()
 
 declare i32 @__gxx_personality_sj0(...)
 
-declare void @_Unwind_SjLj_Unregister({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }*)
+declare void @_Unwind_SjLj_Unregister(ptr)
 
 ; Function Attrs: nounwind readnone
 declare void @llvm.eh.sjlj.callsite(i32)

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
index a998787b3eb77..7de0dfd68d617 100644
--- a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
@@ -5,7 +5,7 @@
 
 @SomeGlobal = external dso_local global i8
 
-define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
+define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality ptr @__gxx_personality_sj0 {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -261,7 +261,7 @@ entry:
   invoke void @errorbar() to label %exit unwind label %handle
 
 handle:
-  %error = landingpad { i8*, i32 } catch i8* @SomeGlobal
+  %error = landingpad { ptr, i32 } catch ptr @SomeGlobal
   ret i32 1
 
 exit:

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_lsda.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_lsda.ll
index d3da4ce971aaf..52ff9ebaf1ae5 100644
--- a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_lsda.ll
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_lsda.ll
@@ -1,16 +1,16 @@
 ; RUN: llc < %s -mtriple=ve -exception-model sjlj | FileCheck %s
 
 ; Function Attrs: noinline nounwind optnone
-define i8* @test_lsda() {
+define ptr @test_lsda() {
 ; CHECK-LABEL: test_lsda:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, GCC_except_table0 at lo
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lea.sl %s0, GCC_except_table0 at hi(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %ret = call i8* @llvm.eh.sjlj.lsda()
-  ret i8* %ret
+  %ret = call ptr @llvm.eh.sjlj.lsda()
+  ret ptr %ret
 }
 
 ; Function Attrs: nounwind
-declare i8* @llvm.eh.sjlj.lsda()
+declare ptr @llvm.eh.sjlj.lsda()

diff  --git a/llvm/test/CodeGen/VE/Scalar/callstruct.ll b/llvm/test/CodeGen/VE/Scalar/callstruct.ll
index 4b5b788adfd4f..b48718fe68d59 100644
--- a/llvm/test/CodeGen/VE/Scalar/callstruct.ll
+++ b/llvm/test/CodeGen/VE/Scalar/callstruct.ll
@@ -5,16 +5,15 @@
 @A = common global %struct.a zeroinitializer, align 4
 
 ; Function Attrs: norecurse nounwind
-define void @fun(%struct.a* noalias nocapture sret(%struct.a) %a, i32 %p1, i32 %p2) {
+define void @fun(ptr noalias nocapture sret(%struct.a) %a, i32 %p1, i32 %p2) {
 ; CHECK-LABEL: fun:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    stl %s2, 4(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %a.zero = getelementptr inbounds %struct.a, %struct.a* %a, i64 0, i32 0
-  store i32 %p1, i32* %a.zero, align 4
-  %a.one = getelementptr inbounds %struct.a, %struct.a* %a, i64 0, i32 1
-  store i32 %p2, i32* %a.one, align 4
+  store i32 %p1, ptr %a, align 4
+  %a.one = getelementptr inbounds %struct.a, ptr %a, i64 0, i32 1
+  store i32 %p2, ptr %a.one, align 4
   ret void
 }
 
@@ -36,11 +35,10 @@ define void @caller() {
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    or %s11, 0, %s9
   %a = alloca i64, align 8
-  %a.bc = bitcast i64* %a to %struct.a*
-  call void @callee(%struct.a* nonnull sret(%struct.a) %a.bc, i32 3, i32 4)
-  %a.val = load i64, i64* %a, align 8
-  store i64 %a.val, i64* bitcast (%struct.a* @A to i64*), align 4
+  call void @callee(ptr nonnull sret(%struct.a) %a, i32 3, i32 4)
+  %a.val = load i64, ptr %a, align 8
+  store i64 %a.val, ptr @A, align 4
   ret void
 }
 
-declare void @callee(%struct.a* sret(%struct.a), i32, i32)
+declare void @callee(ptr sret(%struct.a), i32, i32)

diff  --git a/llvm/test/CodeGen/VE/Scalar/fp_extload_truncstore.ll b/llvm/test/CodeGen/VE/Scalar/fp_extload_truncstore.ll
index 33db92ec2512c..1e708f2b0649a 100644
--- a/llvm/test/CodeGen/VE/Scalar/fp_extload_truncstore.ll
+++ b/llvm/test/CodeGen/VE/Scalar/fp_extload_truncstore.ll
@@ -6,7 +6,7 @@ declare i16 @llvm.convert.to.fp16.f64(double %a)
 declare float @llvm.convert.from.fp16.f32(i16 %a)
 declare double @llvm.convert.from.fp16.f64(i16 %a)
 
-define float @func_i16fp32(i16* %a) {
+define float @func_i16fp32(ptr %a) {
 ; CHECK-LABEL: func_i16fp32:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
@@ -15,12 +15,12 @@ define float @func_i16fp32(i16* %a) {
 ; CHECK-NEXT:    lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %a.val = load i16, i16* %a, align 4
+  %a.val = load i16, ptr %a, align 4
   %a.asd = call float @llvm.convert.from.fp16.f32(i16 %a.val)
   ret float %a.asd
 }
 
-define double @func_i16fp64(i16* %a) {
+define double @func_i16fp64(ptr %a) {
 ; CHECK-LABEL: func_i16fp64:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
@@ -30,12 +30,12 @@ define double @func_i16fp64(i16* %a) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    cvt.d.s %s0, %s0
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %a.val = load i16, i16* %a, align 4
+  %a.val = load i16, ptr %a, align 4
   %a.asd = call double @llvm.convert.from.fp16.f64(i16 %a.val)
   ret double %a.asd
 }
 
-define float @func_fp16fp32(half* %a) {
+define float @func_fp16fp32(ptr %a) {
 ; CHECK-LABEL: func_fp16fp32:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
@@ -44,12 +44,12 @@ define float @func_fp16fp32(half* %a) {
 ; CHECK-NEXT:    lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %a.val = load half, half* %a, align 4
+  %a.val = load half, ptr %a, align 4
   %a.asd = fpext half %a.val to float
   ret float %a.asd
 }
 
-define double @func_fp16fp64(half* %a) {
+define double @func_fp16fp64(ptr %a) {
 ; CHECK-LABEL: func_fp16fp64:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
@@ -59,12 +59,12 @@ define double @func_fp16fp64(half* %a) {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    cvt.d.s %s0, %s0
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %a.val = load half, half* %a, align 4
+  %a.val = load half, ptr %a, align 4
   %a.asd = fpext half %a.val to double
   ret double %a.asd
 }
 
-define void @func_fp32i16(i16* %fl.ptr, float %val) {
+define void @func_fp32i16(ptr %fl.ptr, float %val) {
 ; CHECK-LABEL: func_fp32i16:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    st %s18, 288(, %s11) # 8-byte Folded Spill
@@ -78,11 +78,11 @@ define void @func_fp32i16(i16* %fl.ptr, float %val) {
 ; CHECK-NEXT:    ld %s18, 288(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
   %val.asf = call i16 @llvm.convert.to.fp16.f32(float %val)
-  store i16 %val.asf, i16* %fl.ptr
+  store i16 %val.asf, ptr %fl.ptr
   ret void
 }
 
-define half @func_fp32fp16(half* %fl.ptr, float %a) {
+define half @func_fp32fp16(ptr %fl.ptr, float %a) {
 ; CHECK-LABEL: func_fp32fp16:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    st %s18, 288(, %s11) # 8-byte Folded Spill
@@ -104,22 +104,22 @@ define half @func_fp32fp16(half* %fl.ptr, float %a) {
 ; CHECK-NEXT:    ld %s18, 288(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
   %a.asd = fptrunc float %a to half
-  store half %a.asd, half* %fl.ptr
+  store half %a.asd, ptr %fl.ptr
   ret half %a.asd
 }
 
-define double @func_fp32fp64(float* %a) {
+define double @func_fp32fp64(ptr %a) {
 ; CHECK-LABEL: func_fp32fp64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    cvt.d.s %s0, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %a.val = load float, float* %a, align 4
+  %a.val = load float, ptr %a, align 4
   %a.asd = fpext float %a.val to double
   ret double %a.asd
 }
 
-define void @func_fp64i16(i16* %fl.ptr, double %val) {
+define void @func_fp64i16(ptr %fl.ptr, double %val) {
 ; CHECK-LABEL: func_fp64i16:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    st %s18, 288(, %s11) # 8-byte Folded Spill
@@ -133,11 +133,11 @@ define void @func_fp64i16(i16* %fl.ptr, double %val) {
 ; CHECK-NEXT:    ld %s18, 288(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
   %val.asf = call i16 @llvm.convert.to.fp16.f64(double %val)
-  store i16 %val.asf, i16* %fl.ptr
+  store i16 %val.asf, ptr %fl.ptr
   ret void
 }
 
-define void @func_fp64fp16(half* %fl.ptr, double %val) {
+define void @func_fp64fp16(ptr %fl.ptr, double %val) {
 ; CHECK-LABEL: func_fp64fp16:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    st %s18, 288(, %s11) # 8-byte Folded Spill
@@ -151,17 +151,17 @@ define void @func_fp64fp16(half* %fl.ptr, double %val) {
 ; CHECK-NEXT:    ld %s18, 288(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
   %val.asf = fptrunc double %val to half
-  store half %val.asf, half* %fl.ptr
+  store half %val.asf, ptr %fl.ptr
   ret void
 }
 
-define void @func_fp64fp32(float* %fl.ptr, double %val) {
+define void @func_fp64fp32(ptr %fl.ptr, double %val) {
 ; CHECK-LABEL: func_fp64fp32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    cvt.s.d %s1, %s1
 ; CHECK-NEXT:    stu %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %val.asf = fptrunc double %val to float
-  store float %val.asf, float* %fl.ptr
+  store float %val.asf, ptr %fl.ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll b/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll
index 2f4e6d914fa23..50a934a7bd8b1 100644
--- a/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll
+++ b/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll
@@ -3,7 +3,7 @@
 ;;; Test ‘fneg’ Instruction
 ;;;
 ;;; Syntax:
-;;;   <result> = fneg [fast-math flags]* <ty> <op1>   ; yields ty:result
+;;;   <result> = fneg ptr <ty> <op1>   ; yields ty:result
 ;;;
 ;;; Overview:
 ;;;    The ‘fneg’ instruction returns the negation of its operand.

diff  --git a/llvm/test/CodeGen/VE/Scalar/fp_frem.ll b/llvm/test/CodeGen/VE/Scalar/fp_frem.ll
index 2a3c119810049..2b7ce9c395d06 100644
--- a/llvm/test/CodeGen/VE/Scalar/fp_frem.ll
+++ b/llvm/test/CodeGen/VE/Scalar/fp_frem.ll
@@ -3,7 +3,7 @@
 ;;; Test ‘frem’ Instruction
 ;;;
 ;;; Syntax:
-;;;   <result> = frem [fast-math flags]* <ty> <op1>, <op2> ; yields ty:result
+;;;   <result> = frem ptr <ty> <op1>, <op2> ; yields ty:result
 ;;;
 ;;; Overview:
 ;;;   The ‘frem’ instruction returns the remainder from the division of its two

diff  --git a/llvm/test/CodeGen/VE/Scalar/frameaddr.ll b/llvm/test/CodeGen/VE/Scalar/frameaddr.ll
index 117ae90b6ccc7..5a6bcca72942e 100644
--- a/llvm/test/CodeGen/VE/Scalar/frameaddr.ll
+++ b/llvm/test/CodeGen/VE/Scalar/frameaddr.ll
@@ -1,24 +1,24 @@
 ; RUN: llc < %s -mtriple=ve | FileCheck %s
 
-define i8* @test1() nounwind {
+define ptr @test1() nounwind {
 ; CHECK-LABEL: test1:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s0, 0, %s9
 ; CHECK-NEXT:    or %s11, 0, %s9
 entry:
-  %ret = tail call i8* @llvm.frameaddress(i32 0)
-  ret i8* %ret
+  %ret = tail call ptr @llvm.frameaddress(i32 0)
+  ret ptr %ret
 }
 
-define i8* @test2() nounwind {
+define ptr @test2() nounwind {
 ; CHECK-LABEL: test2:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld %s0, (, %s9)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    or %s11, 0, %s9
 entry:
-  %ret = tail call i8* @llvm.frameaddress(i32 2)
-  ret i8* %ret
+  %ret = tail call ptr @llvm.frameaddress(i32 2)
+  ret ptr %ret
 }
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll b/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll
index 2332f72ef639d..d85227b2047f8 100644
--- a/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll
+++ b/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll
@@ -43,7 +43,7 @@ define i8 @func_gv() {
 ; PIC-NEXT:    ld %s15, 24(, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
 
-  %v = load i8, i8* @vi8, align 1
+  %v = load i8, ptr @vi8, align 1
   ret i8 %v
 }
 
@@ -84,7 +84,7 @@ define i32 @func_alloca(i32 signext %0) {
 ; PIC-NEXT:    adds.l %s11, 16, %s11
 ; PIC-NEXT:    b.l.t (, %s10)
   %2 = alloca i32, align 4
-  store i32 %0, i32* %2, align 4
-  %3 = load i32, i32* %2, align 4
+  store i32 %0, ptr %2, align 4
+  %3 = load i32, ptr %2, align 4
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll
index 1977b928a70bb..750f9a5aee026 100644
--- a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll
+++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll
@@ -12,6 +12,6 @@ define i64 @leam(i64 %x) nounwind {
 ; CHECK-NEXT:    lea %s0, (%s0)
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(i64* elementtype(i64) @A) nounwind
+  %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(ptr elementtype(i64) @A) nounwind
   ret i64 %asmtmp
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll
index 4f12822c8557d..deb72c1ebd29c 100644
--- a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll
+++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll
@@ -9,6 +9,6 @@ define i64 @leam(i64 %x) nounwind {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %z = alloca i64, align 8
-  %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(i64* elementtype(i64) %z) nounwind
+  %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(ptr elementtype(i64) %z) nounwind
   ret i64 %asmtmp
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst-reg.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst-reg.ll
index e484a65c93b19..aa9e6f9590e40 100644
--- a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst-reg.ll
+++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst-reg.ll
@@ -8,11 +8,11 @@
 ;;   t26: ch,glue = inlineasm t25, TargetExternalSymbol:i64'vld $0, $2, $1', MDNode:ch<null>, TargetConstant:i64<1>, TargetConstant:i32<589834>, Register:v512i32 %4, TargetConstant:i32<262153>, Register:i64 %5, TargetConstant:i32<262153>, Register:i64 %6, t25:1
 ;;   t28: v512i32 = bitcast t27
 
-define void @vldvst(i8* %p, i64 %i) nounwind {
+define void @vldvst(ptr %p, i64 %i) nounwind {
 ; CHECK-NOT: v512
   %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
   tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
-  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
-  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind
+  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(ptr %p, i64 %i) nounwind
+  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, ptr %p, i64 %i) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
index 533447749b71c..a4040a0bdcebd 100644
--- a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
+++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
 
-define void @vld(i8* %p, i64 %i) nounwind {
+define void @vld(ptr %p, i64 %i) nounwind {
 ; CHECK-LABEL: vld:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    #APP
@@ -15,11 +15,11 @@ define void @vld(i8* %p, i64 %i) nounwind {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
   tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
-  tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
+  tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(ptr %p, i64 %i) nounwind
   ret void
 }
 
-define void @vldvst(i8* %p, i64 %i) nounwind {
+define void @vldvst(ptr %p, i64 %i) nounwind {
 ; CHECK-LABEL: vldvst:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    #APP
@@ -37,12 +37,12 @@ define void @vldvst(i8* %p, i64 %i) nounwind {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
   tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
-  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
-  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind
+  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(ptr %p, i64 %i) nounwind
+  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, ptr %p, i64 %i) nounwind
   ret void
 }
 
-define void @vld2vst2(i8* %p, i64 %i) nounwind {
+define void @vld2vst2(ptr %p, i64 %i) nounwind {
 ; CHECK-LABEL: vld2vst2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    #APP
@@ -66,9 +66,9 @@ define void @vld2vst2(i8* %p, i64 %i) nounwind {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
   tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
-  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
-  %2 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
-  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind
-  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %2, i8* %p, i64 %i) nounwind
+  %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(ptr %p, i64 %i) nounwind
+  %2 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(ptr %p, i64 %i) nounwind
+  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, ptr %p, i64 %i) nounwind
+  tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %2, ptr %p, i64 %i) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/lea-opt.ll b/llvm/test/CodeGen/VE/Scalar/lea-opt.ll
index 356b27653f4fe..c1987b7805718 100644
--- a/llvm/test/CodeGen/VE/Scalar/lea-opt.ll
+++ b/llvm/test/CodeGen/VE/Scalar/lea-opt.ll
@@ -10,7 +10,7 @@
 @buf = internal global %struct.buffer zeroinitializer, align 8
 
 ; Function Attrs: norecurse nounwind readnone
-define nonnull i8* @lea_basic() {
+define nonnull ptr @lea_basic() {
 ; CHECK-LABEL: lea_basic:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, data at lo
@@ -32,11 +32,11 @@ define nonnull i8* @lea_basic() {
 ; PIC-NEXT:    ld %s16, 32(, %s11)
 ; PIC-NEXT:    ld %s15, 24(, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  ret i8* @data
+  ret ptr @data
 }
 
 ; Function Attrs: norecurse nounwind readnone
-define i8* @lea_offset() {
+define ptr @lea_offset() {
 ; CHECK-LABEL: lea_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, buf at lo
@@ -59,5 +59,5 @@ define i8* @lea_offset() {
 ; PIC-NEXT:    ld %s16, 32(, %s11)
 ; PIC-NEXT:    ld %s15, 24(, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  ret i8* getelementptr inbounds (%struct.buffer, %struct.buffer* @buf, i64 0, i32 1, i64 0)
+  ret ptr getelementptr inbounds (%struct.buffer, ptr @buf, i64 0, i32 1, i64 0)
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/load-align1.ll b/llvm/test/CodeGen/VE/Scalar/load-align1.ll
index 8c7a8d2cf4975..a2822a7286c88 100644
--- a/llvm/test/CodeGen/VE/Scalar/load-align1.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load-align1.ll
@@ -15,7 +15,7 @@ define double @loadf64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 1
-  %1 = load double, double* %addr, align 1
+  %1 = load double, ptr %addr, align 1
   ret double %1
 }
 
@@ -27,7 +27,7 @@ define float @loadf32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 1
-  %1 = load float, float* %addr, align 1
+  %1 = load float, ptr %addr, align 1
   ret float %1
 }
 
@@ -39,7 +39,7 @@ define i64 @loadi64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 1
-  %1 = load i64, i64* %addr, align 1
+  %1 = load i64, ptr %addr, align 1
   ret i64 %1
 }
 
@@ -51,7 +51,7 @@ define i32 @loadi32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 1
-  %1 = load i32, i32* %addr, align 1
+  %1 = load i32, ptr %addr, align 1
   ret i32 %1
 }
 
@@ -63,7 +63,7 @@ define i16 @loadi16stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 1
-  %1 = load i16, i16* %addr, align 1
+  %1 = load i16, ptr %addr, align 1
   ret i16 %1
 }
 
@@ -75,7 +75,7 @@ define i8 @loadi8stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 1
-  %1 = load i8, i8* %addr, align 1
+  %1 = load i8, ptr %addr, align 1
   ret i8 %1
 }
 
@@ -88,7 +88,7 @@ define double @loadf64com() {
 ; CHECK-NEXT:    lea.sl %s0, vf64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load double, double* @vf64, align 1
+  %1 = load double, ptr @vf64, align 1
   ret double %1
 }
 
@@ -101,7 +101,7 @@ define float @loadf32com() {
 ; CHECK-NEXT:    lea.sl %s0, vf32 at hi(, %s0)
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load float, float* @vf32, align 1
+  %1 = load float, ptr @vf32, align 1
   ret float %1
 }
 
@@ -114,7 +114,7 @@ define i64 @loadi64com() {
 ; CHECK-NEXT:    lea.sl %s0, vi64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i64, i64* @vi64, align 1
+  %1 = load i64, ptr @vi64, align 1
   ret i64 %1
 }
 
@@ -127,7 +127,7 @@ define i32 @loadi32com() {
 ; CHECK-NEXT:    lea.sl %s0, vi32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i32, i32* @vi32, align 1
+  %1 = load i32, ptr @vi32, align 1
   ret i32 %1
 }
 
@@ -140,7 +140,7 @@ define i16 @loadi16com() {
 ; CHECK-NEXT:    lea.sl %s0, vi16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i16, i16* @vi16, align 1
+  %1 = load i16, ptr @vi16, align 1
   ret i16 %1
 }
 
@@ -153,7 +153,7 @@ define i8 @loadi8com() {
 ; CHECK-NEXT:    lea.sl %s0, vi8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i8, i8* @vi8, align 1
+  %1 = load i8, ptr @vi8, align 1
   ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/load-align2.ll b/llvm/test/CodeGen/VE/Scalar/load-align2.ll
index 0736646116513..8690029e02703 100644
--- a/llvm/test/CodeGen/VE/Scalar/load-align2.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load-align2.ll
@@ -15,7 +15,7 @@ define double @loadf64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 2
-  %1 = load double, double* %addr, align 2
+  %1 = load double, ptr %addr, align 2
   ret double %1
 }
 
@@ -27,7 +27,7 @@ define float @loadf32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 2
-  %1 = load float, float* %addr, align 2
+  %1 = load float, ptr %addr, align 2
   ret float %1
 }
 
@@ -39,7 +39,7 @@ define i64 @loadi64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 2
-  %1 = load i64, i64* %addr, align 2
+  %1 = load i64, ptr %addr, align 2
   ret i64 %1
 }
 
@@ -51,7 +51,7 @@ define i32 @loadi32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 2
-  %1 = load i32, i32* %addr, align 2
+  %1 = load i32, ptr %addr, align 2
   ret i32 %1
 }
 
@@ -63,7 +63,7 @@ define i16 @loadi16stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 2
-  %1 = load i16, i16* %addr, align 2
+  %1 = load i16, ptr %addr, align 2
   ret i16 %1
 }
 
@@ -75,7 +75,7 @@ define i8 @loadi8stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 2
-  %1 = load i8, i8* %addr, align 2
+  %1 = load i8, ptr %addr, align 2
   ret i8 %1
 }
 
@@ -88,7 +88,7 @@ define double @loadf64com() {
 ; CHECK-NEXT:    lea.sl %s0, vf64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load double, double* @vf64, align 2
+  %1 = load double, ptr @vf64, align 2
   ret double %1
 }
 
@@ -101,7 +101,7 @@ define float @loadf32com() {
 ; CHECK-NEXT:    lea.sl %s0, vf32 at hi(, %s0)
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load float, float* @vf32, align 2
+  %1 = load float, ptr @vf32, align 2
   ret float %1
 }
 
@@ -114,7 +114,7 @@ define i64 @loadi64com() {
 ; CHECK-NEXT:    lea.sl %s0, vi64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i64, i64* @vi64, align 2
+  %1 = load i64, ptr @vi64, align 2
   ret i64 %1
 }
 
@@ -127,7 +127,7 @@ define i32 @loadi32com() {
 ; CHECK-NEXT:    lea.sl %s0, vi32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i32, i32* @vi32, align 2
+  %1 = load i32, ptr @vi32, align 2
   ret i32 %1
 }
 
@@ -140,7 +140,7 @@ define i16 @loadi16com() {
 ; CHECK-NEXT:    lea.sl %s0, vi16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i16, i16* @vi16, align 2
+  %1 = load i16, ptr @vi16, align 2
   ret i16 %1
 }
 
@@ -153,7 +153,7 @@ define i8 @loadi8com() {
 ; CHECK-NEXT:    lea.sl %s0, vi8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i8, i8* @vi8, align 2
+  %1 = load i8, ptr @vi8, align 2
   ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/load-align4.ll b/llvm/test/CodeGen/VE/Scalar/load-align4.ll
index 7ec245ddff576..ac37da4a6cd8a 100644
--- a/llvm/test/CodeGen/VE/Scalar/load-align4.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load-align4.ll
@@ -15,7 +15,7 @@ define double @loadf64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 4
-  %1 = load double, double* %addr, align 4
+  %1 = load double, ptr %addr, align 4
   ret double %1
 }
 
@@ -27,7 +27,7 @@ define float @loadf32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 4
-  %1 = load float, float* %addr, align 4
+  %1 = load float, ptr %addr, align 4
   ret float %1
 }
 
@@ -39,7 +39,7 @@ define i64 @loadi64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 4
-  %1 = load i64, i64* %addr, align 4
+  %1 = load i64, ptr %addr, align 4
   ret i64 %1
 }
 
@@ -51,7 +51,7 @@ define i32 @loadi32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 4
-  %1 = load i32, i32* %addr, align 4
+  %1 = load i32, ptr %addr, align 4
   ret i32 %1
 }
 
@@ -63,7 +63,7 @@ define i16 @loadi16stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 4
-  %1 = load i16, i16* %addr, align 4
+  %1 = load i16, ptr %addr, align 4
   ret i16 %1
 }
 
@@ -75,7 +75,7 @@ define i8 @loadi8stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 4
-  %1 = load i8, i8* %addr, align 4
+  %1 = load i8, ptr %addr, align 4
   ret i8 %1
 }
 
@@ -88,7 +88,7 @@ define double @loadf64com() {
 ; CHECK-NEXT:    lea.sl %s0, vf64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load double, double* @vf64, align 4
+  %1 = load double, ptr @vf64, align 4
   ret double %1
 }
 
@@ -101,7 +101,7 @@ define float @loadf32com() {
 ; CHECK-NEXT:    lea.sl %s0, vf32 at hi(, %s0)
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load float, float* @vf32, align 4
+  %1 = load float, ptr @vf32, align 4
   ret float %1
 }
 
@@ -114,7 +114,7 @@ define i64 @loadi64com() {
 ; CHECK-NEXT:    lea.sl %s0, vi64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i64, i64* @vi64, align 4
+  %1 = load i64, ptr @vi64, align 4
   ret i64 %1
 }
 
@@ -127,7 +127,7 @@ define i32 @loadi32com() {
 ; CHECK-NEXT:    lea.sl %s0, vi32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i32, i32* @vi32, align 4
+  %1 = load i32, ptr @vi32, align 4
   ret i32 %1
 }
 
@@ -140,7 +140,7 @@ define i16 @loadi16com() {
 ; CHECK-NEXT:    lea.sl %s0, vi16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i16, i16* @vi16, align 4
+  %1 = load i16, ptr @vi16, align 4
   ret i16 %1
 }
 
@@ -153,7 +153,7 @@ define i8 @loadi8com() {
 ; CHECK-NEXT:    lea.sl %s0, vi8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i8, i8* @vi8, align 4
+  %1 = load i8, ptr @vi8, align 4
   ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/load-align8.ll b/llvm/test/CodeGen/VE/Scalar/load-align8.ll
index b1457a560ba54..3805e8887570b 100644
--- a/llvm/test/CodeGen/VE/Scalar/load-align8.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load-align8.ll
@@ -15,7 +15,7 @@ define double @loadf64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 8
-  %1 = load double, double* %addr, align 8
+  %1 = load double, ptr %addr, align 8
   ret double %1
 }
 
@@ -27,7 +27,7 @@ define float @loadf32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 8
-  %1 = load float, float* %addr, align 8
+  %1 = load float, ptr %addr, align 8
   ret float %1
 }
 
@@ -39,7 +39,7 @@ define i64 @loadi64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 8
-  %1 = load i64, i64* %addr, align 8
+  %1 = load i64, ptr %addr, align 8
   ret i64 %1
 }
 
@@ -51,7 +51,7 @@ define i32 @loadi32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 8
-  %1 = load i32, i32* %addr, align 8
+  %1 = load i32, ptr %addr, align 8
   ret i32 %1
 }
 
@@ -63,7 +63,7 @@ define i16 @loadi16stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 8
-  %1 = load i16, i16* %addr, align 8
+  %1 = load i16, ptr %addr, align 8
   ret i16 %1
 }
 
@@ -75,7 +75,7 @@ define i8 @loadi8stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 8
-  %1 = load i8, i8* %addr, align 8
+  %1 = load i8, ptr %addr, align 8
   ret i8 %1
 }
 
@@ -88,7 +88,7 @@ define double @loadf64com() {
 ; CHECK-NEXT:    lea.sl %s0, vf64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load double, double* @vf64, align 8
+  %1 = load double, ptr @vf64, align 8
   ret double %1
 }
 
@@ -101,7 +101,7 @@ define float @loadf32com() {
 ; CHECK-NEXT:    lea.sl %s0, vf32 at hi(, %s0)
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load float, float* @vf32, align 8
+  %1 = load float, ptr @vf32, align 8
   ret float %1
 }
 
@@ -114,7 +114,7 @@ define i64 @loadi64com() {
 ; CHECK-NEXT:    lea.sl %s0, vi64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i64, i64* @vi64, align 8
+  %1 = load i64, ptr @vi64, align 8
   ret i64 %1
 }
 
@@ -127,7 +127,7 @@ define i32 @loadi32com() {
 ; CHECK-NEXT:    lea.sl %s0, vi32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i32, i32* @vi32, align 8
+  %1 = load i32, ptr @vi32, align 8
   ret i32 %1
 }
 
@@ -140,7 +140,7 @@ define i16 @loadi16com() {
 ; CHECK-NEXT:    lea.sl %s0, vi16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i16, i16* @vi16, align 8
+  %1 = load i16, ptr @vi16, align 8
   ret i16 %1
 }
 
@@ -153,7 +153,7 @@ define i8 @loadi8com() {
 ; CHECK-NEXT:    lea.sl %s0, vi8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i8, i8* @vi8, align 8
+  %1 = load i8, ptr @vi8, align 8
   ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/load.ll b/llvm/test/CodeGen/VE/Scalar/load.ll
index fb4ab95fa0be0..de0286ad3c031 100644
--- a/llvm/test/CodeGen/VE/Scalar/load.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
 ; Function Attrs: norecurse nounwind readonly
-define fp128 @loadf128(fp128* nocapture readonly %0) {
+define fp128 @loadf128(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadf128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s2, 8(, %s0)
@@ -9,144 +9,144 @@ define fp128 @loadf128(fp128* nocapture readonly %0) {
 ; CHECK-NEXT:    or %s0, 0, %s2
 ; CHECK-NEXT:    or %s1, 0, %s3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load fp128, fp128* %0, align 16
+  %2 = load fp128, ptr %0, align 16
   ret fp128 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define double @loadf64(double* nocapture readonly %0) {
+define double @loadf64(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadf64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load double, double* %0, align 16
+  %2 = load double, ptr %0, align 16
   ret double %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define float @loadf32(float* nocapture readonly %0) {
+define float @loadf32(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadf32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load float, float* %0, align 16
+  %2 = load float, ptr %0, align 16
   ret float %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i128 @loadi128(i128* nocapture readonly %0) {
+define i128 @loadi128(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s2, (, %s0)
 ; CHECK-NEXT:    ld %s1, 8(, %s0)
 ; CHECK-NEXT:    or %s0, 0, %s2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i128, i128* %0, align 16
+  %2 = load i128, ptr %0, align 16
   ret i128 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi64(i64* nocapture readonly %0) {
+define i64 @loadi64(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i64, i64* %0, align 16
+  %2 = load i64, ptr %0, align 16
   ret i64 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i32 @loadi32(i32* nocapture readonly %0) {
+define i32 @loadi32(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i32, i32* %0, align 16
+  %2 = load i32, ptr %0, align 16
   ret i32 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi32sext(i32* nocapture readonly %0) {
+define i64 @loadi32sext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi32sext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i32, i32* %0, align 16
+  %2 = load i32, ptr %0, align 16
   %3 = sext i32 %2 to i64
   ret i64 %3
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi32zext(i32* nocapture readonly %0) {
+define i64 @loadi32zext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi32zext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i32, i32* %0, align 16
+  %2 = load i32, ptr %0, align 16
   %3 = zext i32 %2 to i64
   ret i64 %3
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i16 @loadi16(i16* nocapture readonly %0) {
+define i16 @loadi16(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i16, i16* %0, align 16
+  %2 = load i16, ptr %0, align 16
   ret i16 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi16sext(i16* nocapture readonly %0) {
+define i64 @loadi16sext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi16sext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i16, i16* %0, align 16
+  %2 = load i16, ptr %0, align 16
   %3 = sext i16 %2 to i64
   ret i64 %3
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi16zext(i16* nocapture readonly %0) {
+define i64 @loadi16zext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi16zext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i16, i16* %0, align 16
+  %2 = load i16, ptr %0, align 16
   %3 = zext i16 %2 to i64
   ret i64 %3
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i8 @loadi8(i8* nocapture readonly %0) {
+define i8 @loadi8(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i8, i8* %0, align 16
+  %2 = load i8, ptr %0, align 16
   ret i8 %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi8sext(i8* nocapture readonly %0) {
+define i64 @loadi8sext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi8sext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i8, i8* %0, align 16
+  %2 = load i8, ptr %0, align 16
   %3 = sext i8 %2 to i64
   ret i64 %3
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define i64 @loadi8zext(i8* nocapture readonly %0) {
+define i64 @loadi8zext(ptr nocapture readonly %0) {
 ; CHECK-LABEL: loadi8zext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load i8, i8* %0, align 16
+  %2 = load i8, ptr %0, align 16
   %3 = zext i8 %2 to i64
   ret i64 %3
 }
@@ -160,7 +160,7 @@ define fp128 @loadf128stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca fp128, align 16
-  %1 = load fp128, fp128* %addr, align 16
+  %1 = load fp128, ptr %addr, align 16
   ret fp128 %1
 }
 
@@ -172,7 +172,7 @@ define double @loadf64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 16
-  %1 = load double, double* %addr, align 16
+  %1 = load double, ptr %addr, align 16
   ret double %1
 }
 
@@ -184,7 +184,7 @@ define float @loadf32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 16
-  %1 = load float, float* %addr, align 16
+  %1 = load float, ptr %addr, align 16
   ret float %1
 }
 
@@ -197,7 +197,7 @@ define i128 @loadi128stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i128, align 16
-  %1 = load i128, i128* %addr, align 16
+  %1 = load i128, ptr %addr, align 16
   ret i128 %1
 }
 
@@ -209,7 +209,7 @@ define i64 @loadi64stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 16
-  %1 = load i64, i64* %addr, align 16
+  %1 = load i64, ptr %addr, align 16
   ret i64 %1
 }
 
@@ -221,7 +221,7 @@ define i32 @loadi32stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 16
-  %1 = load i32, i32* %addr, align 16
+  %1 = load i32, ptr %addr, align 16
   ret i32 %1
 }
 
@@ -233,7 +233,7 @@ define i16 @loadi16stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 16
-  %1 = load i16, i16* %addr, align 16
+  %1 = load i16, ptr %addr, align 16
   ret i16 %1
 }
 
@@ -245,6 +245,6 @@ define i8 @loadi8stk() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 16
-  %1 = load i8, i8* %addr, align 16
+  %1 = load i8, ptr %addr, align 16
   ret i8 %1
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/load_gv.ll b/llvm/test/CodeGen/VE/Scalar/load_gv.ll
index 374bb6612ee94..b4daad4663dde 100644
--- a/llvm/test/CodeGen/VE/Scalar/load_gv.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load_gv.ll
@@ -19,7 +19,7 @@ define fp128 @loadf128com() {
 ; CHECK-NEXT:    ld %s0, 8(, %s2)
 ; CHECK-NEXT:    ld %s1, (, %s2)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load fp128, fp128* @vf128, align 16
+  %1 = load fp128, ptr @vf128, align 16
   ret fp128 %1
 }
 
@@ -32,7 +32,7 @@ define double @loadf64com() {
 ; CHECK-NEXT:    lea.sl %s0, vf64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load double, double* @vf64, align 8
+  %1 = load double, ptr @vf64, align 8
   ret double %1
 }
 
@@ -45,7 +45,7 @@ define float @loadf32com() {
 ; CHECK-NEXT:    lea.sl %s0, vf32 at hi(, %s0)
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load float, float* @vf32, align 4
+  %1 = load float, ptr @vf32, align 4
   ret float %1
 }
 
@@ -59,7 +59,7 @@ define i128 @loadi128com() {
 ; CHECK-NEXT:    ld %s0, (, %s1)
 ; CHECK-NEXT:    ld %s1, 8(, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i128, i128* @vi128, align 16
+  %1 = load i128, ptr @vi128, align 16
   ret i128 %1
 }
 
@@ -72,7 +72,7 @@ define i64 @loadi64com() {
 ; CHECK-NEXT:    lea.sl %s0, vi64 at hi(, %s0)
 ; CHECK-NEXT:    ld %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i64, i64* @vi64, align 8
+  %1 = load i64, ptr @vi64, align 8
   ret i64 %1
 }
 
@@ -85,7 +85,7 @@ define i32 @loadi32com() {
 ; CHECK-NEXT:    lea.sl %s0, vi32 at hi(, %s0)
 ; CHECK-NEXT:    ldl.sx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i32, i32* @vi32, align 4
+  %1 = load i32, ptr @vi32, align 4
   ret i32 %1
 }
 
@@ -98,7 +98,7 @@ define i16 @loadi16com() {
 ; CHECK-NEXT:    lea.sl %s0, vi16 at hi(, %s0)
 ; CHECK-NEXT:    ld2b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i16, i16* @vi16, align 2
+  %1 = load i16, ptr @vi16, align 2
   ret i16 %1
 }
 
@@ -111,6 +111,6 @@ define i8 @loadi8com() {
 ; CHECK-NEXT:    lea.sl %s0, vi8 at hi(, %s0)
 ; CHECK-NEXT:    ld1b.zx %s0, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load i8, i8* @vi8, align 1
+  %1 = load i8, ptr @vi8, align 1
   ret i8 %1
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/load_off.ll b/llvm/test/CodeGen/VE/Scalar/load_off.ll
index d29f15d91776a..c776dcc0e71ff 100644
--- a/llvm/test/CodeGen/VE/Scalar/load_off.ll
+++ b/llvm/test/CodeGen/VE/Scalar/load_off.ll
@@ -19,7 +19,7 @@ define signext i8 @loadi8s() {
 ; CHECK-NEXT:    ld1b.sx %s0, 2(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bufi8, i64 0, i64 2), align 1
+  %0 = load i8, ptr getelementptr inbounds ([3 x i8], ptr @bufi8, i64 0, i64 2), align 1
   ret i8 %0
 }
 
@@ -33,7 +33,7 @@ define signext i16 @loadi16s() {
 ; CHECK-NEXT:    ld2b.sx %s0, 4(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([3 x i16], [3 x i16]* @bufi16, i64 0, i64 2), align 2
+  %0 = load i16, ptr getelementptr inbounds ([3 x i16], ptr @bufi16, i64 0, i64 2), align 2
   ret i16 %0
 }
 
@@ -47,7 +47,7 @@ define signext i32 @loadi32s() {
 ; CHECK-NEXT:    ldl.sx %s0, 8(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @bufi32, i64 0, i64 2), align 4
+  %0 = load i32, ptr getelementptr inbounds ([3 x i32], ptr @bufi32, i64 0, i64 2), align 4
   ret i32 %0
 }
 
@@ -61,7 +61,7 @@ define i64 @loadi64s() {
 ; CHECK-NEXT:    ld %s0, 16(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @bufi64, i64 0, i64 2), align 8
+  %0 = load i64, ptr getelementptr inbounds ([3 x i64], ptr @bufi64, i64 0, i64 2), align 8
   ret i64 %0
 }
 
@@ -76,7 +76,7 @@ define i128 @loadi128s() {
 ; CHECK-NEXT:    ld %s1, 40(, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i128, i128* getelementptr inbounds ([3 x i128], [3 x i128]* @bufi128, i64 0, i64 2), align 16
+  %0 = load i128, ptr getelementptr inbounds ([3 x i128], ptr @bufi128, i64 0, i64 2), align 16
   ret i128 %0
 }
 
@@ -90,7 +90,7 @@ define zeroext i8 @loadi8z() {
 ; CHECK-NEXT:    ld1b.zx %s0, 2(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bufi8, i64 0, i64 2), align 1
+  %0 = load i8, ptr getelementptr inbounds ([3 x i8], ptr @bufi8, i64 0, i64 2), align 1
   ret i8 %0
 }
 
@@ -104,7 +104,7 @@ define zeroext i16 @loadi16z() {
 ; CHECK-NEXT:    ld2b.zx %s0, 4(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i16, i16* getelementptr inbounds ([3 x i16], [3 x i16]* @bufi16, i64 0, i64 2), align 2
+  %0 = load i16, ptr getelementptr inbounds ([3 x i16], ptr @bufi16, i64 0, i64 2), align 2
   ret i16 %0
 }
 
@@ -118,7 +118,7 @@ define zeroext i32 @loadi32z() {
 ; CHECK-NEXT:    ldl.zx %s0, 8(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @bufi32, i64 0, i64 2), align 4
+  %0 = load i32, ptr getelementptr inbounds ([3 x i32], ptr @bufi32, i64 0, i64 2), align 4
   ret i32 %0
 }
 
@@ -132,7 +132,7 @@ define i64 @loadi64z() {
 ; CHECK-NEXT:    ld %s0, 16(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i64, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @bufi64, i64 0, i64 2), align 8
+  %0 = load i64, ptr getelementptr inbounds ([3 x i64], ptr @bufi64, i64 0, i64 2), align 8
   ret i64 %0
 }
 
@@ -147,7 +147,7 @@ define i128 @loadi128z() {
 ; CHECK-NEXT:    ld %s1, 40(, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load i128, i128* getelementptr inbounds ([3 x i128], [3 x i128]* @bufi128, i64 0, i64 2), align 16
+  %0 = load i128, ptr getelementptr inbounds ([3 x i128], ptr @bufi128, i64 0, i64 2), align 16
   ret i128 %0
 }
 
@@ -161,7 +161,7 @@ define float @loadf32() {
 ; CHECK-NEXT:    ldu %s0, 8(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load float, float* getelementptr inbounds ([3 x float], [3 x float]* @buff32, i64 0, i64 2), align 4
+  %0 = load float, ptr getelementptr inbounds ([3 x float], ptr @buff32, i64 0, i64 2), align 4
   ret float %0
 }
 
@@ -175,7 +175,7 @@ define double @loadf64() {
 ; CHECK-NEXT:    ld %s0, 16(, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load double, double* getelementptr inbounds ([3 x double], [3 x double]* @buff64, i64 0, i64 2), align 8
+  %0 = load double, ptr getelementptr inbounds ([3 x double], ptr @buff64, i64 0, i64 2), align 8
   ret double %0
 }
 
@@ -190,6 +190,6 @@ define fp128 @loadf128() {
 ; CHECK-NEXT:    ld %s1, 32(, %s2)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %0 = load fp128, fp128* getelementptr inbounds ([3 x fp128], [3 x fp128]* @buff128, i64 0, i64 2), align 16
+  %0 = load fp128, ptr getelementptr inbounds ([3 x fp128], ptr @buff128, i64 0, i64 2), align 16
   ret fp128 %0
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/loadrri.ll b/llvm/test/CodeGen/VE/Scalar/loadrri.ll
index 18e4f837c6de1..8a88e608ff2c7 100644
--- a/llvm/test/CodeGen/VE/Scalar/loadrri.ll
+++ b/llvm/test/CodeGen/VE/Scalar/loadrri.ll
@@ -10,20 +10,20 @@
 ;;;   3. LD1BSXrri with %reg + %frame-index
 
 ; Function Attrs: norecurse nounwind readonly
-define signext i8 @func_rr(%struct.data* nocapture readonly %0, i32 signext %1) {
+define signext i8 @func_rr(ptr nocapture readonly %0, i32 signext %1) {
 ; CHECK-LABEL: func_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    sll %s1, %s1, 2
 ; CHECK-NEXT:    ld1b.sx %s0, (%s1, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = sext i32 %1 to i64
-  %4 = getelementptr inbounds %struct.data, %struct.data* %0, i64 %3, i32 0, i64 0
-  %5 = load i8, i8* %4, align 1
+  %4 = getelementptr inbounds %struct.data, ptr %0, i64 %3, i32 0, i64 0
+  %5 = load i8, ptr %4, align 1
   ret i8 %5
 }
 
 ; Function Attrs: nounwind
-define signext i8 @func_fr(%struct.data* readonly %0, i32 signext %1) {
+define signext i8 @func_fr(ptr readonly %0, i32 signext %1) {
 ; CHECK-LABEL: func_fr:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    sll %s1, %s1, 2
@@ -33,25 +33,24 @@ define signext i8 @func_fr(%struct.data* readonly %0, i32 signext %1) {
 ; CHECK-NEXT:    adds.l %s11, 48, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = alloca [10 x %struct.data], align 1
-  %4 = getelementptr inbounds [10 x %struct.data], [10 x %struct.data]* %3, i64 0, i64 0, i32 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 40, i8* nonnull %4)
-  %5 = sext i32 %1 to i64
-  %6 = getelementptr inbounds [10 x %struct.data], [10 x %struct.data]* %3, i64 0, i64 %5, i32 0, i64 0
-  %7 = getelementptr inbounds %struct.data, %struct.data* %0, i64 %5, i32 0, i64 0
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 1 %6, i8* align 1 %7, i64 4, i1 true)
-  %8 = load volatile i8, i8* %6, align 1
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %4)
-  ret i8 %8
+  call void @llvm.lifetime.start.p0(i64 40, ptr nonnull %3)
+  %4 = sext i32 %1 to i64
+  %5 = getelementptr inbounds [10 x %struct.data], ptr %3, i64 0, i64 %4, i32 0, i64 0
+  %6 = getelementptr inbounds %struct.data, ptr %0, i64 %4, i32 0, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 1 %5, ptr align 1 %6, i64 4, i1 true)
+  %7 = load volatile i8, ptr %5, align 1
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %3)
+  ret i8 %7
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 %"basic_string" = type { %union.anon.3, [23 x i8] }
 %union.anon.3 = type { i8 }
 
-define signext i8 @func_rf(i8* readonly %0, i64 %1, i32 signext %2) {
+define signext i8 @func_rf(ptr readonly %0, i64 %1, i32 signext %2) {
 ; CHECK-LABEL: func_rf:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    ld1b.sx %s0, 8(%s1, %s11)
@@ -60,7 +59,7 @@ define signext i8 @func_rf(i8* readonly %0, i64 %1, i32 signext %2) {
   %buf = alloca %"basic_string", align 8
 
   %sub631 = add nsw i64 %1, -1
-  %add.ptr.i = getelementptr inbounds %"basic_string", %"basic_string"* %buf, i64 0, i32 1, i64 %sub631
-  %ret = load i8, i8* %add.ptr.i, align 1
+  %add.ptr.i = getelementptr inbounds %"basic_string", ptr %buf, i64 0, i32 1, i64 %sub631
+  %ret = load i8, ptr %add.ptr.i, align 1
   ret i8 %ret
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/pic_access_static_data.ll b/llvm/test/CodeGen/VE/Scalar/pic_access_static_data.ll
index 790aaf8b20a53..cab545a691fc6 100644
--- a/llvm/test/CodeGen/VE/Scalar/pic_access_static_data.ll
+++ b/llvm/test/CodeGen/VE/Scalar/pic_access_static_data.ll
@@ -27,9 +27,9 @@ define void @func() {
 ; CHECK-NEXT:    ld %s15, 24(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
 
-  %1 = load i1, i1* @src, align 4
+  %1 = load i1, ptr @src, align 4
   %2 = select i1 %1, i32 100, i32 0
-  store i32 %2, i32* @dst, align 4
+  store i32 %2, ptr @dst, align 4
   ret void
 }
 
@@ -67,11 +67,11 @@ define i32 @main() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s0, 0, (0)1
 ; CHECK-NEXT:    or %s11, 0, %s9
-  store i1 true, i1* @src, align 4
+  store i1 true, ptr @src, align 4
   tail call void @func()
-  %1 = load i32, i32* @dst, align 4
-  %2 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i32 %1)
+  %1 = load i32, ptr @dst, align 4
+  %2 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)

diff  --git a/llvm/test/CodeGen/VE/Scalar/pic_func_call.ll b/llvm/test/CodeGen/VE/Scalar/pic_func_call.ll
index 489ffbe4c0ea9..0a4bf0f6f8982 100644
--- a/llvm/test/CodeGen/VE/Scalar/pic_func_call.ll
+++ b/llvm/test/CodeGen/VE/Scalar/pic_func_call.ll
@@ -14,7 +14,7 @@ define void @func() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s11, 0, %s9
 
-  call void bitcast (void (...)* @function to void ()*)()
+  call void @function()
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/returnaddr.ll b/llvm/test/CodeGen/VE/Scalar/returnaddr.ll
index ea1b5f687c9da..e2c96079f11a9 100644
--- a/llvm/test/CodeGen/VE/Scalar/returnaddr.ll
+++ b/llvm/test/CodeGen/VE/Scalar/returnaddr.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-- | FileCheck %s
 
-define i8* @h() nounwind readnone optsize {
+define ptr @h() nounwind readnone optsize {
 ; CHECK-LABEL: h:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -27,13 +27,13 @@ define i8* @h() nounwind readnone optsize {
 ; CHECK-NEXT:    ld %s9, (, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %ret = tail call i8* @llvm.returnaddress(i32 2)
-  ret i8* %ret
+  %ret = tail call ptr @llvm.returnaddress(i32 2)
+  ret ptr %ret
 }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
 
-define i8* @g() nounwind readnone optsize {
+define ptr @g() nounwind readnone optsize {
 ; CHECK-LABEL: g:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -58,11 +58,11 @@ define i8* @g() nounwind readnone optsize {
 ; CHECK-NEXT:    ld %s9, (, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %ret = tail call i8* @llvm.returnaddress(i32 1)
-  ret i8* %ret
+  %ret = tail call ptr @llvm.returnaddress(i32 1)
+  ret ptr %ret
 }
 
-define i8* @f() nounwind readnone optsize {
+define ptr @f() nounwind readnone optsize {
 ; CHECK-LABEL: f:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -86,6 +86,6 @@ define i8* @f() nounwind readnone optsize {
 ; CHECK-NEXT:    ld %s9, (, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
 entry:
-  %ret = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %ret
+  %ret = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %ret
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll b/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll
index f9f8aa8cebbfd..5bb96f8e4fd88 100644
--- a/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll
+++ b/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll
@@ -7,7 +7,7 @@ define signext i16 @func1() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i16
   ret i16 %a.conv
 }
@@ -19,7 +19,7 @@ define i32 @func2() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i32
   ret i32 %a.conv
 }
@@ -31,7 +31,7 @@ define i64 @func3() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i64
   ret i64 %a.conv
 }
@@ -44,7 +44,7 @@ define zeroext i16 @func5() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i16
   ret i16 %a.conv
 }
@@ -56,7 +56,7 @@ define i32 @func6() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i32
   ret i32 %a.conv
 }
@@ -68,7 +68,7 @@ define i64 @func7() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = sext i8 %a.val to i64
   ret i64 %a.conv
 }
@@ -80,7 +80,7 @@ define signext i16 @func9() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i16
   ret i16 %a.conv
 }
@@ -92,7 +92,7 @@ define i32 @func10() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i32
   ret i32 %a.conv
 }
@@ -104,7 +104,7 @@ define i64 @func11() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i64
   ret i64 %a.conv
 }
@@ -116,7 +116,7 @@ define zeroext i16 @func13() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i16
   ret i16 %a.conv
 }
@@ -128,7 +128,7 @@ define zeroext i16 @func14() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i16
   ret i16 %a.conv
 }
@@ -140,7 +140,7 @@ define i64 @func15() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i8, align 1
-  %a.val = load i8, i8* %a, align 1
+  %a.val = load i8, ptr %a, align 1
   %a.conv = zext i8 %a.val to i64
   ret i64 %a.conv
 }
@@ -152,7 +152,7 @@ define i32 @func17() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = sext i16 %a.val to i32
   ret i32 %a.conv
 }
@@ -164,7 +164,7 @@ define i64 @func18() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = sext i16 %a.val to i64
   ret i64 %a.conv
 }
@@ -176,7 +176,7 @@ define zeroext i16 @func20() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.conv = load i16, i16* %a, align 2
+  %a.conv = load i16, ptr %a, align 2
   ret i16 %a.conv
 }
 
@@ -187,7 +187,7 @@ define i64 @func21() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = sext i16 %a.val to i64
   ret i64 %a.conv
 }
@@ -199,7 +199,7 @@ define i32 @func23() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = zext i16 %a.val to i32
   ret i32 %a.conv
 }
@@ -211,7 +211,7 @@ define i64 @func24() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = zext i16 %a.val to i64
   ret i64 %a.conv
 }
@@ -223,7 +223,7 @@ define zeroext i16 @func26() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.conv = load i16, i16* %a, align 2
+  %a.conv = load i16, ptr %a, align 2
   ret i16 %a.conv
 }
 
@@ -234,7 +234,7 @@ define i64 @func27() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i16, align 2
-  %a.val = load i16, i16* %a, align 2
+  %a.val = load i16, ptr %a, align 2
   %a.conv = zext i16 %a.val to i64
   ret i64 %a.conv
 }
@@ -246,7 +246,7 @@ define i64 @func29() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i32, align 4
-  %a.val = load i32, i32* %a, align 4
+  %a.val = load i32, ptr %a, align 4
   %a.conv = sext i32 %a.val to i64
   ret i64 %a.conv
 }
@@ -258,7 +258,7 @@ define i64 @func31() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i32, align 4
-  %a.val = load i32, i32* %a, align 4
+  %a.val = load i32, ptr %a, align 4
   %a.conv = sext i32 %a.val to i64
   ret i64 %a.conv
 }
@@ -270,7 +270,7 @@ define i64 @func33() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i32, align 4
-  %a.val = load i32, i32* %a, align 4
+  %a.val = load i32, ptr %a, align 4
   %a.conv = zext i32 %a.val to i64
   ret i64 %a.conv
 }
@@ -282,7 +282,7 @@ define i64 @func35() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i32, align 4
-  %a.val = load i32, i32* %a, align 4
+  %a.val = load i32, ptr %a, align 4
   %a.conv = zext i32 %a.val to i64
   ret i64 %a.conv
 }
@@ -296,7 +296,7 @@ define signext i8 @func37() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = sext i1 %a.val to i8
   ret i8 %a.conv
 }
@@ -310,7 +310,7 @@ define signext i16 @func38() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = sext i1 %a.val to i16
   ret i16 %a.conv
 }
@@ -324,7 +324,7 @@ define signext i32 @func39() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = sext i1 %a.val to i32
   ret i32 %a.conv
 }
@@ -338,7 +338,7 @@ define signext i64 @func40() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = sext i1 %a.val to i64
   ret i64 %a.conv
 }
@@ -350,7 +350,7 @@ define signext i8 @func42() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = zext i1 %a.val to i8
   ret i8 %a.conv
 }
@@ -362,7 +362,7 @@ define signext i16 @func43() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = zext i1 %a.val to i16
   ret i16 %a.conv
 }
@@ -374,7 +374,7 @@ define signext i32 @func44() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = zext i1 %a.val to i32
   ret i32 %a.conv
 }
@@ -386,7 +386,7 @@ define signext i64 @func45() {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %a = alloca i1, align 1
-  %a.val = load i1, i1* %a, align 1
+  %a.val = load i1, ptr %a, align 1
   %a.conv = zext i1 %a.val to i64
   ret i64 %a.conv
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll
index 7270bf0b9e84d..d90c0bcf9f837 100644
--- a/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll
+++ b/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll
@@ -9,7 +9,7 @@
 ;; Allocated buffer places from 9 to 15 bytes in 16 bytes local vars area.
 
 ; Function Attrs: nounwind
-define i8* @test_frame7(i8* %0) {
+define ptr @test_frame7(ptr %0) {
 ; CHECK-LABEL: test_frame7:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -16, %s11
@@ -55,17 +55,16 @@ define i8* @test_frame7(i8* %0) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [7 x i8], align 1
-  %3 = getelementptr inbounds [7 x i8], [7 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ;; Allocated buffer is aligned by 8, so it places from 8 to 14 bytes in 16
 ;; bytes local vars area.
 
 ; Function Attrs: nounwind
-define i8* @test_frame7_align8(i8* %0) {
+define ptr @test_frame7_align8(ptr %0) {
 ; CHECK-LABEL: test_frame7_align8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -16, %s11
@@ -111,17 +110,16 @@ define i8* @test_frame7_align8(i8* %0) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [7 x i8], align 8
-  %3 = getelementptr inbounds [7 x i8], [7 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ;; Allocated buffer is aligned by 16, so it places from 0 to 15 bytes in 16
 ;; bytes local vars area.
 
 ; Function Attrs: nounwind
-define i8* @test_frame16_align16(i8* %0) {
+define ptr @test_frame16_align16(ptr %0) {
 ; CHECK-LABEL: test_frame16_align16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -16, %s11
@@ -167,10 +165,9 @@ define i8* @test_frame16_align16(i8* %0) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [16 x i8], align 16
-  %3 = getelementptr inbounds [16 x i8], [16 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ;; Allocated buffer is aligned by 32, so it places from 0 to 15 bytes in 48
@@ -178,7 +175,7 @@ define i8* @test_frame16_align16(i8* %0) {
 ;; 207 bytes in 224 + alpha allocated local vars area.
 
 ; Function Attrs: nounwind
-define i8* @test_frame16_align32(i8* %0) {
+define ptr @test_frame16_align32(ptr %0) {
 ; CHECK-LABEL: test_frame16_align32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -231,10 +228,9 @@ define i8* @test_frame16_align32(i8* %0) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [16 x i8], align 32
-  %3 = getelementptr inbounds [16 x i8], [16 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ;; Allocated buffer is aligned by 32, so it places from 0 to 31 bytes in 48
@@ -242,7 +238,7 @@ define i8* @test_frame16_align32(i8* %0) {
 ;; to 223 in 224 + alpha bytes local vars area..
 
 ; Function Attrs: nounwind
-define i8* @test_frame32_align32(i8* %0) {
+define ptr @test_frame32_align32(ptr %0) {
 ; CHECK-LABEL: test_frame32_align32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -295,10 +291,9 @@ define i8* @test_frame32_align32(i8* %0) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 32
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ;; Dynamically allocated buffer is aligned by 16, so it places from 0 to 31
@@ -310,7 +305,7 @@ define i8* @test_frame32_align32(i8* %0) {
 ;; FIXME: (size+15)/16*16 is not enough.
 
 ; Function Attrs: nounwind
-define i8* @test_frame_dynalign16(i8* %0, i64 %1) {
+define ptr @test_frame_dynalign16(ptr %0, i64 %1) {
 ; CHECK-LABEL: test_frame_dynalign16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -375,9 +370,9 @@ define i8* @test_frame_dynalign16(i8* %0, i64 %1) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %3 = alloca i8, i64 %1, align 16
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %4 = load i8, ptr %0, align 1
+  store i8 %4, ptr %3, align 1
+  ret ptr %3
 }
 
 ;; This test allocates static buffer with 16 bytes align and dynamic buffer
@@ -391,7 +386,7 @@ define i8* @test_frame_dynalign16(i8* %0, i64 %1) {
 ;; address between 240 and 271 from SP.
 
 ; Function Attrs: nounwind
-define i8* @test_frame16_align16_dynalign32(i8* %0, i64 %n) {
+define ptr @test_frame16_align16_dynalign32(ptr %0, i64 %n) {
 ; CHECK-LABEL: test_frame16_align16_dynalign32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -472,12 +467,11 @@ define i8* @test_frame16_align16_dynalign32(i8* %0, i64 %n) {
 ; CHECKFP-NEXT:    ld %s9, (, %s11)
 ; CHECKFP-NEXT:    b.l.t (, %s10)
   %2 = alloca [16 x i8], align 16
-  %3 = getelementptr inbounds [16 x i8], [16 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  %5 = alloca i8, i64 %n, align 32
-  %6 = load i8, i8* %3, align 1
-  store i8 %6, i8* %5, align 1
-  ret i8* %5
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  %4 = alloca i8, i64 %n, align 32
+  %5 = load i8, ptr %2, align 1
+  store i8 %5, ptr %4, align 1
+  ret ptr %4
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll
index 560502611add1..3a3b1ba1544c4 100644
--- a/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll
+++ b/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll
@@ -12,7 +12,7 @@
 @data = external global i8, align 1
 
 ; Function Attrs: nounwind
-define i8* @test_frame0(i8* %0, i8* %1) {
+define ptr @test_frame0(ptr %0, ptr %1) {
 ; CHECK-LABEL: test_frame0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -73,14 +73,14 @@ define i8* @test_frame0(i8* %0, i8* %1) {
 ; PIC-NEXT:    ld %s10, 8(, %s11)
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  %3 = tail call i8* @fun(i8* %0, i8* %1)
-  ret i8* %3
+  %3 = tail call ptr @fun(ptr %0, ptr %1)
+  ret ptr %3
 }
 
-declare i8* @fun(i8*, i8*)
+declare ptr @fun(ptr, ptr)
 
 ; Function Attrs: nounwind
-define i8* @test_frame32(i8* %0) {
+define ptr @test_frame32(ptr %0) {
 ; CHECK-LABEL: test_frame32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -146,21 +146,20 @@ define i8* @test_frame32(i8* %0) {
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 1
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %3)
-  %4 = call i8* @fun(i8* nonnull %3, i8* %0)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %3)
-  ret i8* %4
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %2)
+  %3 = call ptr @fun(ptr nonnull %2, ptr %0)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %2)
+  ret ptr %3
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: nounwind
-define i8* @test_align32(i32 signext %0, i8* nocapture readnone %1) {
+define ptr @test_align32(i32 signext %0, ptr nocapture readnone %1) {
 ; CHECK-LABEL: test_align32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -253,17 +252,16 @@ define i8* @test_align32(i32 signext %0, i8* nocapture readnone %1) {
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %3 = alloca [32 x i8], align 32
-  %4 = getelementptr inbounds [32 x i8], [32 x i8]* %3, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %4)
-  %5 = sext i32 %0 to i64
-  %6 = alloca i8, i64 %5, align 32
-  %7 = call i8* @fun(i8* nonnull %4, i8* nonnull %6)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %4)
-  ret i8* %7
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+  %4 = sext i32 %0 to i64
+  %5 = alloca i8, i64 %4, align 32
+  %6 = call ptr @fun(ptr nonnull %3, ptr nonnull %5)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+  ret ptr %6
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame0_var(i8* %0, i8* %1) {
+define ptr @test_frame0_var(ptr %0, ptr %1) {
 ; CHECK-LABEL: test_frame0_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -335,14 +333,14 @@ define i8* @test_frame0_var(i8* %0, i8* %1) {
 ; PIC-NEXT:    ld %s10, 8(, %s11)
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  %3 = load i8, i8* @data, align 1
-  store i8 %3, i8* %0, align 1
-  %4 = tail call i8* @fun(i8* nonnull %0, i8* %1)
-  ret i8* %4
+  %3 = load i8, ptr @data, align 1
+  store i8 %3, ptr %0, align 1
+  %4 = tail call ptr @fun(ptr nonnull %0, ptr %1)
+  ret ptr %4
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame32_var(i8* %0) {
+define ptr @test_frame32_var(ptr %0) {
 ; CHECK-LABEL: test_frame32_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -419,17 +417,16 @@ define i8* @test_frame32_var(i8* %0) {
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 1
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %3)
-  %4 = load i8, i8* @data, align 1
-  store i8 %4, i8* %3, align 1
-  %5 = call i8* @fun(i8* nonnull %3, i8* %0)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %3)
-  ret i8* %5
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %2)
+  %3 = load i8, ptr @data, align 1
+  store i8 %3, ptr %2, align 1
+  %4 = call ptr @fun(ptr nonnull %2, ptr %0)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %2)
+  ret ptr %4
 }
 
 ; Function Attrs: nounwind
-define i8* @test_align32_var(i32 signext %0, i8* nocapture readnone %1) {
+define ptr @test_align32_var(i32 signext %0, ptr nocapture readnone %1) {
 ; CHECK-LABEL: test_align32_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -533,13 +530,12 @@ define i8* @test_align32_var(i32 signext %0, i8* nocapture readnone %1) {
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %3 = alloca [32 x i8], align 32
-  %4 = getelementptr inbounds [32 x i8], [32 x i8]* %3, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %4)
-  %5 = sext i32 %0 to i64
-  %6 = alloca i8, i64 %5, align 32
-  %7 = load i8, i8* @data, align 1
-  store i8 %7, i8* %6, align 32
-  %8 = call i8* @fun(i8* nonnull %4, i8* nonnull %6)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %4)
-  ret i8* %8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+  %4 = sext i32 %0 to i64
+  %5 = alloca i8, i64 %4, align 32
+  %6 = load i8, ptr @data, align 1
+  store i8 %6, ptr %5, align 32
+  %7 = call ptr @fun(ptr nonnull %3, ptr nonnull %5)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+  ret ptr %7
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll
index 47eb8732ed981..f9308a172ad05 100644
--- a/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll
+++ b/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll
@@ -12,7 +12,7 @@
 @data = external global i8, align 1
 
 ; Function Attrs: norecurse nounwind readnone
-define i8* @test_frame0(i8* nocapture readnone %0, i8* readnone returned %1) {
+define ptr @test_frame0(ptr nocapture readnone %0, ptr readnone returned %1) {
 ; CHECK-LABEL: test_frame0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    or %s0, 0, %s1
@@ -22,11 +22,11 @@ define i8* @test_frame0(i8* nocapture readnone %0, i8* readnone returned %1) {
 ; PIC:       # %bb.0:
 ; PIC-NEXT:    or %s0, 0, %s1
 ; PIC-NEXT:    b.l.t (, %s10)
-  ret i8* %1
+  ret ptr %1
 }
 
 ; Function Attrs: nofree nounwind
-define nonnull i8* @test_frame32(i8* nocapture readonly %0) {
+define nonnull ptr @test_frame32(ptr nocapture readonly %0) {
 ; CHECK-LABEL: test_frame32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -32, %s11
@@ -67,22 +67,21 @@ define nonnull i8* @test_frame32(i8* nocapture readonly %0) {
 ; PIC-NEXT:    adds.l %s11, 32, %s11
 ; PIC-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 1
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %3)
-  %4 = load i8, i8* %0, align 1
-  store volatile i8 %4, i8* %3, align 1
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %3)
-  ret i8* %3
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %2)
+  %3 = load i8, ptr %0, align 1
+  store volatile i8 %3, ptr %2, align 1
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %2)
+  ret ptr %2
 }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: nofree nounwind
-define noalias nonnull i8* @test_align32(i32 signext %0, i8* nocapture readonly %1) {
+define noalias nonnull ptr @test_align32(i32 signext %0, ptr nocapture readonly %1) {
 ; CHECK-LABEL: test_align32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -172,18 +171,17 @@ define noalias nonnull i8* @test_align32(i32 signext %0, i8* nocapture readonly
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %3 = alloca [32 x i8], align 32
-  %4 = getelementptr inbounds [32 x i8], [32 x i8]* %3, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %4)
-  %5 = sext i32 %0 to i64
-  %6 = alloca i8, i64 %5, align 32
-  %7 = load i8, i8* %1, align 1
-  store volatile i8 %7, i8* %6, align 32
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %4)
-  ret i8* %4
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+  %4 = sext i32 %0 to i64
+  %5 = alloca i8, i64 %4, align 32
+  %6 = load i8, ptr %1, align 1
+  store volatile i8 %6, ptr %5, align 32
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+  ret ptr %3
 }
 
 ; Function Attrs: nofree norecurse nounwind
-define i8* @test_frame0_var(i8* returned %0, i8* nocapture readnone %1) {
+define ptr @test_frame0_var(ptr returned %0, ptr nocapture readnone %1) {
 ; CHECK-LABEL: test_frame0_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, data at lo
@@ -210,13 +208,13 @@ define i8* @test_frame0_var(i8* returned %0, i8* nocapture readnone %1) {
 ; PIC-NEXT:    ld %s16, 32(, %s11)
 ; PIC-NEXT:    ld %s15, 24(, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
-  %3 = load i8, i8* @data, align 1
-  store i8 %3, i8* %0, align 1
-  ret i8* %0
+  %3 = load i8, ptr @data, align 1
+  store i8 %3, ptr %0, align 1
+  ret ptr %0
 }
 
 ; Function Attrs: nofree nounwind
-define nonnull i8* @test_frame32_var(i8* nocapture readnone %0) {
+define nonnull ptr @test_frame32_var(ptr nocapture readnone %0) {
 ; CHECK-LABEL: test_frame32_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -32, %s11
@@ -272,16 +270,15 @@ define nonnull i8* @test_frame32_var(i8* nocapture readnone %0) {
 ; PIC-NEXT:    ld %s15, 24(, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 1
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %3)
-  %4 = load i8, i8* @data, align 1
-  store volatile i8 %4, i8* %3, align 1
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %3)
-  ret i8* %3
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %2)
+  %3 = load i8, ptr @data, align 1
+  store volatile i8 %3, ptr %2, align 1
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %2)
+  ret ptr %2
 }
 
 ; Function Attrs: nofree nounwind
-define noalias nonnull i8* @test_align32_var(i32 signext %0, i8* nocapture readonly %1) {
+define noalias nonnull ptr @test_align32_var(i32 signext %0, ptr nocapture readonly %1) {
 ; CHECK-LABEL: test_align32_var:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s9, (, %s11)
@@ -371,12 +368,11 @@ define noalias nonnull i8* @test_align32_var(i32 signext %0, i8* nocapture reado
 ; PIC-NEXT:    ld %s9, (, %s11)
 ; PIC-NEXT:    b.l.t (, %s10)
   %3 = alloca [32 x i8], align 32
-  %4 = getelementptr inbounds [32 x i8], [32 x i8]* %3, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* nonnull %4)
-  %5 = sext i32 %0 to i64
-  %6 = alloca i8, i64 %5, align 32
-  %7 = load i8, i8* %1, align 1
-  store volatile i8 %7, i8* %6, align 32
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* nonnull %4)
-  ret i8* %4
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+  %4 = sext i32 %0 to i64
+  %5 = alloca i8, i64 %4, align 32
+  %6 = load i8, ptr %1, align 1
+  store volatile i8 %6, ptr %5, align 32
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+  ret ptr %3
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll
index 0d40b3bed71be..f0368b99bfa7d 100644
--- a/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll
+++ b/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll
@@ -16,7 +16,7 @@ define signext i32 @test_frame0(i32 signext %0) {
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame8(i8* %0) {
+define ptr @test_frame8(ptr %0) {
 ; CHECK-LABEL: test_frame8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -16, %s11
@@ -37,14 +37,13 @@ define i8* @test_frame8(i8* %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [8 x i8], align 1
-  %3 = getelementptr inbounds [8 x i8], [8 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame16(i8* %0) {
+define ptr @test_frame16(ptr %0) {
 ; CHECK-LABEL: test_frame16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -16, %s11
@@ -65,14 +64,13 @@ define i8* @test_frame16(i8* %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [16 x i8], align 1
-  %3 = getelementptr inbounds [16 x i8], [16 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame32(i8* %0) {
+define ptr @test_frame32(ptr %0) {
 ; CHECK-LABEL: test_frame32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -32, %s11
@@ -93,14 +91,13 @@ define i8* @test_frame32(i8* %0) {
 ; CHECK-NEXT:    adds.l %s11, 32, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [32 x i8], align 1
-  %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame64(i8* %0) {
+define ptr @test_frame64(ptr %0) {
 ; CHECK-LABEL: test_frame64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.l %s11, -64, %s11
@@ -121,14 +118,13 @@ define i8* @test_frame64(i8* %0) {
 ; CHECK-NEXT:    lea %s11, 64(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [64 x i8], align 1
-  %3 = getelementptr inbounds [64 x i8], [64 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame128(i8* %0) {
+define ptr @test_frame128(ptr %0) {
 ; CHECK-LABEL: test_frame128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s11, -128(, %s11)
@@ -149,14 +145,13 @@ define i8* @test_frame128(i8* %0) {
 ; CHECK-NEXT:    lea %s11, 128(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [128 x i8], align 1
-  %3 = getelementptr inbounds [128 x i8], [128 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame65536(i8* %0) {
+define ptr @test_frame65536(ptr %0) {
 ; CHECK-LABEL: test_frame65536:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s11, -65536(, %s11)
@@ -177,14 +172,13 @@ define i8* @test_frame65536(i8* %0) {
 ; CHECK-NEXT:    lea %s11, 65536(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [65536 x i8], align 1
-  %3 = getelementptr inbounds [65536 x i8], [65536 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }
 
 ; Function Attrs: nounwind
-define i8* @test_frame4294967296(i8* %0) {
+define ptr @test_frame4294967296(ptr %0) {
 ; CHECK-LABEL: test_frame4294967296:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s13, 0
@@ -209,8 +203,7 @@ define i8* @test_frame4294967296(i8* %0) {
 ; CHECK-NEXT:    lea.sl %s11, 1(%s13, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %2 = alloca [4294967296 x i8], align 1
-  %3 = getelementptr inbounds [4294967296 x i8], [4294967296 x i8]* %2, i64 0, i64 0
-  %4 = load i8, i8* %0, align 1
-  store i8 %4, i8* %3, align 1
-  ret i8* %3
+  %3 = load i8, ptr %0, align 1
+  store i8 %3, ptr %2, align 1
+  ret ptr %2
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/stacksave.ll b/llvm/test/CodeGen/VE/Scalar/stacksave.ll
index 336f9b83455fd..097046a0baea7 100644
--- a/llvm/test/CodeGen/VE/Scalar/stacksave.ll
+++ b/llvm/test/CodeGen/VE/Scalar/stacksave.ll
@@ -1,26 +1,26 @@
 ; RUN: llc < %s -mtriple=ve | FileCheck %s
 
 ; Function Attrs: noinline nounwind optnone
-define i8* @stacksave() {
+define ptr @stacksave() {
 ; CHECK-LABEL: stacksave:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s0, 0, %s11
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %ret = call i8* @llvm.stacksave()
-  ret i8* %ret
+  %ret = call ptr @llvm.stacksave()
+  ret ptr %ret
 }
 
 ; Function Attrs: noinline nounwind optnone
-define void @stackrestore(i8* %ptr) {
+define void @stackrestore(ptr %ptr) {
 ; CHECK-LABEL: stackrestore:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    or %s11, 0, %s0
 ; CHECK-NEXT:    or %s11, 0, %s9
-  call void @llvm.stackrestore(i8* %ptr)
+  call void @llvm.stackrestore(ptr %ptr)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
 ; Function Attrs: nounwind
-declare void @llvm.stackrestore(i8*)
+declare void @llvm.stackrestore(ptr)

diff  --git a/llvm/test/CodeGen/VE/Scalar/store-align1.ll b/llvm/test/CodeGen/VE/Scalar/store-align1.ll
index fa8076aa12565..83df8d4fa04c0 100644
--- a/llvm/test/CodeGen/VE/Scalar/store-align1.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store-align1.ll
@@ -15,7 +15,7 @@ define void @storef64stk(double %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 1
-  store double %0, double* %addr, align 1
+  store double %0, ptr %addr, align 1
   ret void
 }
 
@@ -27,7 +27,7 @@ define void @storef32stk(float %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 1
-  store float %0, float* %addr, align 1
+  store float %0, ptr %addr, align 1
   ret void
 }
 
@@ -39,7 +39,7 @@ define void @storei64stk(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 1
-  store i64 %0, i64* %addr, align 1
+  store i64 %0, ptr %addr, align 1
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @storei32stk(i32 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 1
-  store i32 %0, i32* %addr, align 1
+  store i32 %0, ptr %addr, align 1
   ret void
 }
 
@@ -63,7 +63,7 @@ define void @storei16stk(i16 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 1
-  store i16 %0, i16* %addr, align 1
+  store i16 %0, ptr %addr, align 1
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @storei8stk(i8 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 1
-  store i8 %0, i8* %addr, align 1
+  store i8 %0, ptr %addr, align 1
   ret void
 }
 
@@ -88,7 +88,7 @@ define void @storef64com(double %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %0, double* @vf64, align 1
+  store double %0, ptr @vf64, align 1
   ret void
 }
 
@@ -101,7 +101,7 @@ define void @storef32com(float %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf32 at hi(, %s1)
 ; CHECK-NEXT:    stu %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %0, float* @vf32, align 1
+  store float %0, ptr @vf32, align 1
   ret void
 }
 
@@ -114,7 +114,7 @@ define void @storei64com(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %0, i64* @vi64, align 1
+  store i64 %0, ptr @vi64, align 1
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @storei32com(i32 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %0, i32* @vi32, align 1
+  store i32 %0, ptr @vi32, align 1
   ret void
 }
 
@@ -140,7 +140,7 @@ define void @storei16com(i16 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %0, i16* @vi16, align 1
+  store i16 %0, ptr @vi16, align 1
   ret void
 }
 
@@ -153,7 +153,7 @@ define void @storei8com(i8 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %0, i8* @vi8, align 1
+  store i8 %0, ptr @vi8, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/store-align2.ll b/llvm/test/CodeGen/VE/Scalar/store-align2.ll
index 7b4596ce4ae52..72eaef254e155 100644
--- a/llvm/test/CodeGen/VE/Scalar/store-align2.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store-align2.ll
@@ -15,7 +15,7 @@ define void @storef64stk(double %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 2
-  store double %0, double* %addr, align 2
+  store double %0, ptr %addr, align 2
   ret void
 }
 
@@ -27,7 +27,7 @@ define void @storef32stk(float %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 2
-  store float %0, float* %addr, align 2
+  store float %0, ptr %addr, align 2
   ret void
 }
 
@@ -39,7 +39,7 @@ define void @storei64stk(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 2
-  store i64 %0, i64* %addr, align 2
+  store i64 %0, ptr %addr, align 2
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @storei32stk(i32 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 2
-  store i32 %0, i32* %addr, align 2
+  store i32 %0, ptr %addr, align 2
   ret void
 }
 
@@ -63,7 +63,7 @@ define void @storei16stk(i16 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 2
-  store i16 %0, i16* %addr, align 2
+  store i16 %0, ptr %addr, align 2
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @storei8stk(i8 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 2
-  store i8 %0, i8* %addr, align 2
+  store i8 %0, ptr %addr, align 2
   ret void
 }
 
@@ -88,7 +88,7 @@ define void @storef64com(double %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %0, double* @vf64, align 2
+  store double %0, ptr @vf64, align 2
   ret void
 }
 
@@ -101,7 +101,7 @@ define void @storef32com(float %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf32 at hi(, %s1)
 ; CHECK-NEXT:    stu %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %0, float* @vf32, align 2
+  store float %0, ptr @vf32, align 2
   ret void
 }
 
@@ -114,7 +114,7 @@ define void @storei64com(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %0, i64* @vi64, align 2
+  store i64 %0, ptr @vi64, align 2
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @storei32com(i32 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %0, i32* @vi32, align 2
+  store i32 %0, ptr @vi32, align 2
   ret void
 }
 
@@ -140,7 +140,7 @@ define void @storei16com(i16 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %0, i16* @vi16, align 2
+  store i16 %0, ptr @vi16, align 2
   ret void
 }
 
@@ -153,7 +153,7 @@ define void @storei8com(i8 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %0, i8* @vi8, align 2
+  store i8 %0, ptr @vi8, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/store-align4.ll b/llvm/test/CodeGen/VE/Scalar/store-align4.ll
index 153c219343cf8..d20a677fb7016 100644
--- a/llvm/test/CodeGen/VE/Scalar/store-align4.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store-align4.ll
@@ -15,7 +15,7 @@ define void @storef64stk(double %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 4
-  store double %0, double* %addr, align 4
+  store double %0, ptr %addr, align 4
   ret void
 }
 
@@ -27,7 +27,7 @@ define void @storef32stk(float %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 4
-  store float %0, float* %addr, align 4
+  store float %0, ptr %addr, align 4
   ret void
 }
 
@@ -39,7 +39,7 @@ define void @storei64stk(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 4
-  store i64 %0, i64* %addr, align 4
+  store i64 %0, ptr %addr, align 4
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @storei32stk(i32 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 4
-  store i32 %0, i32* %addr, align 4
+  store i32 %0, ptr %addr, align 4
   ret void
 }
 
@@ -63,7 +63,7 @@ define void @storei16stk(i16 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 4
-  store i16 %0, i16* %addr, align 4
+  store i16 %0, ptr %addr, align 4
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @storei8stk(i8 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 4
-  store i8 %0, i8* %addr, align 4
+  store i8 %0, ptr %addr, align 4
   ret void
 }
 
@@ -88,7 +88,7 @@ define void @storef64com(double %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %0, double* @vf64, align 4
+  store double %0, ptr @vf64, align 4
   ret void
 }
 
@@ -101,7 +101,7 @@ define void @storef32com(float %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf32 at hi(, %s1)
 ; CHECK-NEXT:    stu %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %0, float* @vf32, align 4
+  store float %0, ptr @vf32, align 4
   ret void
 }
 
@@ -114,7 +114,7 @@ define void @storei64com(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %0, i64* @vi64, align 4
+  store i64 %0, ptr @vi64, align 4
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @storei32com(i32 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %0, i32* @vi32, align 4
+  store i32 %0, ptr @vi32, align 4
   ret void
 }
 
@@ -140,7 +140,7 @@ define void @storei16com(i16 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %0, i16* @vi16, align 4
+  store i16 %0, ptr @vi16, align 4
   ret void
 }
 
@@ -153,7 +153,7 @@ define void @storei8com(i8 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %0, i8* @vi8, align 4
+  store i8 %0, ptr @vi8, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/store-align8.ll b/llvm/test/CodeGen/VE/Scalar/store-align8.ll
index 4d166cc5e5ed3..5efa01e2a9f66 100644
--- a/llvm/test/CodeGen/VE/Scalar/store-align8.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store-align8.ll
@@ -15,7 +15,7 @@ define void @storef64stk(double %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 8
-  store double %0, double* %addr, align 8
+  store double %0, ptr %addr, align 8
   ret void
 }
 
@@ -27,7 +27,7 @@ define void @storef32stk(float %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 8
-  store float %0, float* %addr, align 8
+  store float %0, ptr %addr, align 8
   ret void
 }
 
@@ -39,7 +39,7 @@ define void @storei64stk(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 8
-  store i64 %0, i64* %addr, align 8
+  store i64 %0, ptr %addr, align 8
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @storei32stk(i32 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 8
-  store i32 %0, i32* %addr, align 8
+  store i32 %0, ptr %addr, align 8
   ret void
 }
 
@@ -63,7 +63,7 @@ define void @storei16stk(i16 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 8
-  store i16 %0, i16* %addr, align 8
+  store i16 %0, ptr %addr, align 8
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @storei8stk(i8 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 8
-  store i8 %0, i8* %addr, align 8
+  store i8 %0, ptr %addr, align 8
   ret void
 }
 
@@ -88,7 +88,7 @@ define void @storef64com(double %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %0, double* @vf64, align 8
+  store double %0, ptr @vf64, align 8
   ret void
 }
 
@@ -101,7 +101,7 @@ define void @storef32com(float %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf32 at hi(, %s1)
 ; CHECK-NEXT:    stu %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %0, float* @vf32, align 8
+  store float %0, ptr @vf32, align 8
   ret void
 }
 
@@ -114,7 +114,7 @@ define void @storei64com(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %0, i64* @vi64, align 8
+  store i64 %0, ptr @vi64, align 8
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @storei32com(i32 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %0, i32* @vi32, align 8
+  store i32 %0, ptr @vi32, align 8
   ret void
 }
 
@@ -140,7 +140,7 @@ define void @storei16com(i16 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %0, i16* @vi16, align 8
+  store i16 %0, ptr @vi16, align 8
   ret void
 }
 
@@ -153,7 +153,7 @@ define void @storei8com(i8 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %0, i8* @vi8, align 8
+  store i8 %0, ptr @vi8, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/store.ll b/llvm/test/CodeGen/VE/Scalar/store.ll
index 748607d1b3585..441fc6d1a384b 100644
--- a/llvm/test/CodeGen/VE/Scalar/store.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store.ll
@@ -1,117 +1,117 @@
 ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storef128(fp128* nocapture %0, fp128 %1) {
+define void @storef128(ptr nocapture %0, fp128 %1) {
 ; CHECK-LABEL: storef128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s2, 8(, %s0)
 ; CHECK-NEXT:    st %s3, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store fp128 %1, fp128* %0, align 16
+  store fp128 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storef64(double* nocapture %0, double %1) {
+define void @storef64(ptr nocapture %0, double %1) {
 ; CHECK-LABEL: storef64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %1, double* %0, align 16
+  store double %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storef32(float* nocapture %0, float %1) {
+define void @storef32(ptr nocapture %0, float %1) {
 ; CHECK-LABEL: storef32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stu %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %1, float* %0, align 16
+  store float %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei128(i128* nocapture %0, i128 %1) {
+define void @storei128(ptr nocapture %0, i128 %1) {
 ; CHECK-LABEL: storei128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s2, 8(, %s0)
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i128 %1, i128* %0, align 16
+  store i128 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei64(i64* nocapture %0, i64 %1) {
+define void @storei64(ptr nocapture %0, i64 %1) {
 ; CHECK-LABEL: storei64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %1, i64* %0, align 16
+  store i64 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei32(i32* nocapture %0, i32 %1) {
+define void @storei32(ptr nocapture %0, i32 %1) {
 ; CHECK-LABEL: storei32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %1, i32* %0, align 16
+  store i32 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei32tr(i32* nocapture %0, i64 %1) {
+define void @storei32tr(ptr nocapture %0, i64 %1) {
 ; CHECK-LABEL: storei32tr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = trunc i64 %1 to i32
-  store i32 %3, i32* %0, align 16
+  store i32 %3, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei16(i16* nocapture %0, i16 %1) {
+define void @storei16(ptr nocapture %0, i16 %1) {
 ; CHECK-LABEL: storei16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %1, i16* %0, align 16
+  store i16 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei16tr(i16* nocapture %0, i64 %1) {
+define void @storei16tr(ptr nocapture %0, i64 %1) {
 ; CHECK-LABEL: storei16tr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st2b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = trunc i64 %1 to i16
-  store i16 %3, i16* %0, align 16
+  store i16 %3, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei8(i8* nocapture %0, i8 %1) {
+define void @storei8(ptr nocapture %0, i8 %1) {
 ; CHECK-LABEL: storei8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %1, i8* %0, align 16
+  store i8 %1, ptr %0, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define void @storei8tr(i8* nocapture %0, i64 %1) {
+define void @storei8tr(ptr nocapture %0, i64 %1) {
 ; CHECK-LABEL: storei8tr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = trunc i64 %1 to i8
-  store i8 %3, i8* %0, align 16
+  store i8 %3, ptr %0, align 16
   ret void
 }
 
@@ -124,7 +124,7 @@ define void @storef128stk(fp128 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca fp128, align 16
-  store fp128 %0, fp128* %addr, align 16
+  store fp128 %0, ptr %addr, align 16
   ret void
 }
 
@@ -136,7 +136,7 @@ define void @storef64stk(double %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca double, align 16
-  store double %0, double* %addr, align 16
+  store double %0, ptr %addr, align 16
   ret void
 }
 
@@ -148,7 +148,7 @@ define void @storef32stk(float %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca float, align 16
-  store float %0, float* %addr, align 16
+  store float %0, ptr %addr, align 16
   ret void
 }
 
@@ -161,7 +161,7 @@ define void @storei128stk(i128 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i128, align 16
-  store i128 %0, i128* %addr, align 16
+  store i128 %0, ptr %addr, align 16
   ret void
 }
 
@@ -173,7 +173,7 @@ define void @storei64stk(i64 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i64, align 16
-  store i64 %0, i64* %addr, align 16
+  store i64 %0, ptr %addr, align 16
   ret void
 }
 
@@ -185,7 +185,7 @@ define void @storei32stk(i32 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i32, align 16
-  store i32 %0, i32* %addr, align 16
+  store i32 %0, ptr %addr, align 16
   ret void
 }
 
@@ -197,7 +197,7 @@ define void @storei16stk(i16 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i16, align 16
-  store i16 %0, i16* %addr, align 16
+  store i16 %0, ptr %addr, align 16
   ret void
 }
 
@@ -209,6 +209,6 @@ define void @storei8stk(i8 %0) {
 ; CHECK-NEXT:    adds.l %s11, 16, %s11
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca i8, align 16
-  store i8 %0, i8* %addr, align 16
+  store i8 %0, ptr %addr, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/store_gv.ll b/llvm/test/CodeGen/VE/Scalar/store_gv.ll
index b8731e54d13aa..6f70b81a4915e 100644
--- a/llvm/test/CodeGen/VE/Scalar/store_gv.ll
+++ b/llvm/test/CodeGen/VE/Scalar/store_gv.ll
@@ -19,7 +19,7 @@ define void @storef128com(fp128 %0) {
 ; CHECK-NEXT:    st %s0, 8(, %s2)
 ; CHECK-NEXT:    st %s1, (, %s2)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store fp128 %0, fp128* @vf128, align 16
+  store fp128 %0, ptr @vf128, align 16
   ret void
 }
 
@@ -32,7 +32,7 @@ define void @storef64com(double %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store double %0, double* @vf64, align 8
+  store double %0, ptr @vf64, align 8
   ret void
 }
 
@@ -45,7 +45,7 @@ define void @storef32com(float %0) {
 ; CHECK-NEXT:    lea.sl %s1, vf32 at hi(, %s1)
 ; CHECK-NEXT:    stu %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store float %0, float* @vf32, align 4
+  store float %0, ptr @vf32, align 4
   ret void
 }
 
@@ -59,7 +59,7 @@ define void @storei128com(i128 %0) {
 ; CHECK-NEXT:    st %s1, 8(, %s2)
 ; CHECK-NEXT:    st %s0, (, %s2)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i128 %0, i128* @vi128, align 16
+  store i128 %0, ptr @vi128, align 16
   ret void
 }
 
@@ -72,7 +72,7 @@ define void @storei64com(i64 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi64 at hi(, %s1)
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i64 %0, i64* @vi64, align 8
+  store i64 %0, ptr @vi64, align 8
   ret void
 }
 
@@ -85,7 +85,7 @@ define void @storei32com(i32 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi32 at hi(, %s1)
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i32 %0, i32* @vi32, align 4
+  store i32 %0, ptr @vi32, align 4
   ret void
 }
 
@@ -98,7 +98,7 @@ define void @storei16com(i16 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi16 at hi(, %s1)
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i16 %0, i16* @vi16, align 2
+  store i16 %0, ptr @vi16, align 2
   ret void
 }
 
@@ -111,6 +111,6 @@ define void @storei8com(i8 %0) {
 ; CHECK-NEXT:    lea.sl %s1, vi8 at hi(, %s1)
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store i8 %0, i8* @vi8, align 1
+  store i8 %0, ptr @vi8, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/symbol_relocation_tls.ll b/llvm/test/CodeGen/VE/Scalar/symbol_relocation_tls.ll
index 968398e7ec925..fce36da7b9e85 100644
--- a/llvm/test/CodeGen/VE/Scalar/symbol_relocation_tls.ll
+++ b/llvm/test/CodeGen/VE/Scalar/symbol_relocation_tls.ll
@@ -12,7 +12,7 @@
 @y = internal thread_local global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind readnone
-define nonnull i32* @get_global() {
+define nonnull ptr @get_global() {
 ; GENDYN:         lea %s0, (-24)
 ; GENDYN-NEXT:    R_VE_TLS_GD_LO32 x
 ; GENDYN-NEXT:    and %s0, %s0, (32)0
@@ -47,11 +47,11 @@ define nonnull i32* @get_global() {
 ; GENDYNPIC-NEXT:    bsic %s10, (, %s12)
 ; GENDYNPIC-NEXT:    or %s11, 0, %s9
 entry:
-  ret i32* @x
+  ret ptr @x
 }
 
 ; Function Attrs: norecurse nounwind readnone
-define nonnull i32* @get_local() {
+define nonnull ptr @get_local() {
 ; GENDYN:         lea %s0, (-24)
 ; GENDYN-NEXT:    R_VE_TLS_GD_LO32 y
 ; GENDYN-NEXT:    and %s0, %s0, (32)0
@@ -86,7 +86,7 @@ define nonnull i32* @get_local() {
 ; GENDYNPIC-NEXT:    bsic %s10, (, %s12)
 ; GENDYNPIC-NEXT:    or %s11, 0, %s9
 entry:
-  ret i32* @y
+  ret ptr @y
 }
 
 ; Function Attrs: norecurse nounwind
@@ -129,7 +129,7 @@ define void @set_global(i32 %v) {
 ; GENDYNPIC-NEXT:    ld %s18, 288(, %s11)
 ; GENDYNPIC-NEXT:    or %s11, 0, %s9
 entry:
-  store i32 %v, i32* @x, align 4
+  store i32 %v, ptr @x, align 4
   ret void
 }
 
@@ -173,6 +173,6 @@ define void @set_local(i32 %v) {
 ; GENDYNPIC-NEXT:    ld %s18, 288(, %s11)
 ; GENDYNPIC-NEXT:    or %s11, 0, %s9
 entry:
-  store i32 %v, i32* @y, align 4
+  store i32 %v, ptr @y, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/tls.ll b/llvm/test/CodeGen/VE/Scalar/tls.ll
index c8c8eefbfd717..c6247abf1fd60 100644
--- a/llvm/test/CodeGen/VE/Scalar/tls.ll
+++ b/llvm/test/CodeGen/VE/Scalar/tls.ll
@@ -9,7 +9,7 @@
 @y = internal thread_local global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind readnone
-define nonnull i32* @get_global() {
+define nonnull ptr @get_global() {
 ; GENDYN-LABEL: get_global:
 ; GENDYN:       .LBB{{[0-9]+}}_2:
 ; GENDYN-NEXT:    lea %s0, x at tls_gd_lo(-24)
@@ -46,11 +46,11 @@ define nonnull i32* @get_global() {
 ; LOCAL-NEXT:  adds.l %s0, %s14, %s34
 ; LOCAL-NEXT:  or %s11, 0, %s9
 entry:
-  ret i32* @x
+  ret ptr @x
 }
 
 ; Function Attrs: norecurse nounwind readnone
-define nonnull i32* @get_local() {
+define nonnull ptr @get_local() {
 ; GENDYN-LABEL: get_local:
 ; GENDYN:       .LBB{{[0-9]+}}_2:
 ; GENDYN-NEXT:    lea %s0, y at tls_gd_lo(-24)
@@ -87,7 +87,7 @@ define nonnull i32* @get_local() {
 ; LOCAL-NEXT:  adds.l %s0, %s14, %s34
 ; LOCAL-NEXT:  or %s11, 0, %s9
 entry:
-  ret i32* @y
+  ret ptr @y
 }
 
 ; Function Attrs: norecurse nounwind
@@ -137,7 +137,7 @@ define void @set_global(i32 %v) {
 ; LOCAL-NEXT:  stl %s0, (, %s34)
 ; LOCAL-NEXT:  or %s11, 0, %s9
 entry:
-  store i32 %v, i32* @x, align 4
+  store i32 %v, ptr @x, align 4
   ret void
 }
 
@@ -188,6 +188,6 @@ define void @set_local(i32 %v) {
 ; LOCAL-NEXT:  stl %s0, (, %s34)
 ; LOCAL-NEXT:  or %s11, 0, %s9
 entry:
-  store i32 %v, i32* @y, align 4
+  store i32 %v, ptr @y, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/truncstore.ll b/llvm/test/CodeGen/VE/Scalar/truncstore.ll
index 8cdf355d9316f..5f102008f426c 100644
--- a/llvm/test/CodeGen/VE/Scalar/truncstore.ll
+++ b/llvm/test/CodeGen/VE/Scalar/truncstore.ll
@@ -1,72 +1,72 @@
 ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
 
-define void @func0(i1 signext %p, i8* %a) {
+define void @func0(i1 signext %p, ptr %a) {
 ; CHECK-LABEL: func0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st1b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i1 %p to i8
-  store i8 %p.conv, i8* %a, align 2
+  store i8 %p.conv, ptr %a, align 2
   ret void
 }
 
-define void @func1(i8 signext %p, i16* %a) {
+define void @func1(i8 signext %p, ptr %a) {
 ; CHECK-LABEL: func1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st2b %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i8 %p to i16
-  store i16 %p.conv, i16* %a, align 2
+  store i16 %p.conv, ptr %a, align 2
   ret void
 }
 
-define void @func2(i8 signext %p, i32* %a) {
+define void @func2(i8 signext %p, ptr %a) {
 ; CHECK-LABEL: func2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i8 %p to i32
-  store i32 %p.conv, i32* %a, align 4
+  store i32 %p.conv, ptr %a, align 4
   ret void
 }
 
-define void @func3(i8 signext %p, i64* %a) {
+define void @func3(i8 signext %p, ptr %a) {
 ; CHECK-LABEL: func3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i8 %p to i64
-  store i64 %p.conv, i64* %a, align 8
+  store i64 %p.conv, ptr %a, align 8
   ret void
 }
 
-define void @func5(i16 signext %p, i32* %a) {
+define void @func5(i16 signext %p, ptr %a) {
 ; CHECK-LABEL: func5:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    stl %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i16 %p to i32
-  store i32 %p.conv, i32* %a, align 4
+  store i32 %p.conv, ptr %a, align 4
   ret void
 }
 
-define void @func6(i16 signext %p, i64* %a) {
+define void @func6(i16 signext %p, ptr %a) {
 ; CHECK-LABEL: func6:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i16 %p to i64
-  store i64 %p.conv, i64* %a, align 8
+  store i64 %p.conv, ptr %a, align 8
   ret void
 }
 
-define void @func8(i32 %p, i64* %a) {
+define void @func8(i32 %p, ptr %a) {
 ; CHECK-LABEL: func8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %p.conv = sext i32 %p to i64
-  store i64 %p.conv, i64* %a, align 8
+  store i64 %p.conv, ptr %a, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Scalar/va_arg.ll b/llvm/test/CodeGen/VE/Scalar/va_arg.ll
index f6105f9605b51..f6ae60f6b96ba 100644
--- a/llvm/test/CodeGen/VE/Scalar/va_arg.ll
+++ b/llvm/test/CodeGen/VE/Scalar/va_arg.ll
@@ -43,44 +43,43 @@ define i32 @func_vainout(i32, ...) {
 ; CHECK:         bsic
 ; CHECK:         bsic
 
-  %a = alloca i8*, align 8
-  %a8 = bitcast i8** %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %a8)
-  call void @llvm.va_start(i8* nonnull %a8)
-  %p0 = va_arg i8** %a, i32
-  %p1 = va_arg i8** %a, i16
-  %p2 = va_arg i8** %a, i8
-  %p3 = va_arg i8** %a, i32
-  %p4 = va_arg i8** %a, i16
-  %p5 = va_arg i8** %a, i8
-  %p6 = va_arg i8** %a, float
-  %p7 = va_arg i8** %a, i8*
-  %p8 = va_arg i8** %a, i64
-  %p9 = va_arg i8** %a, double
-  %p10 = va_arg i8** %a, fp128
-  %p11 = va_arg i8** %a, double
-  call void @llvm.va_end(i8* nonnull %a8)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %a8)
-  %pf0 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0), i32 %p0)
+  %a = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+  call void @llvm.va_start(ptr nonnull %a)
+  %p0 = va_arg ptr %a, i32
+  %p1 = va_arg ptr %a, i16
+  %p2 = va_arg ptr %a, i8
+  %p3 = va_arg ptr %a, i32
+  %p4 = va_arg ptr %a, i16
+  %p5 = va_arg ptr %a, i8
+  %p6 = va_arg ptr %a, float
+  %p7 = va_arg ptr %a, ptr
+  %p8 = va_arg ptr %a, i64
+  %p9 = va_arg ptr %a, double
+  %p10 = va_arg ptr %a, fp128
+  %p11 = va_arg ptr %a, double
+  call void @llvm.va_end(ptr nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
+  %pf0 = call i32 (ptr, ...) @printf(ptr @.str, i32 %p0)
   %p1.s32 = sext i16 %p1 to i32
-  %pf1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.1, i64 0, i64 0), i32 %p1.s32)
+  %pf1 = call i32 (ptr, ...) @printf(ptr @.str.1, i32 %p1.s32)
   %p2.s32 = sext i8 %p2 to i32
-  %pf2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.2, i64 0, i64 0), i32 %p2.s32)
-  %pf3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.3, i64 0, i64 0), i32 %p3)
+  %pf2 = call i32 (ptr, ...) @printf(ptr @.str.2, i32 %p2.s32)
+  %pf3 = call i32 (ptr, ...) @printf(ptr @.str.3, i32 %p3)
   %p4.z32 = zext i16 %p4 to i32
-  %pf4 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.4, i64 0, i64 0), i32 %p4.z32)
+  %pf4 = call i32 (ptr, ...) @printf(ptr @.str.4, i32 %p4.z32)
   %p5.z32 = zext i8 %p5 to i32
-  %pf5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.5, i64 0, i64 0), i32 %p5.z32)
-  %pf6 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.6, i64 0, i64 0), float %p6)
-  %pf7 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.7, i64 0, i64 0), i8* %p7)
-  %pf8 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.8, i64 0, i64 0), i64 %p8)
-  %pf9 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.9, i64 0, i64 0), double %p9)
-  %pf10 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.10, i64 0, i64 0), fp128 %p10)
-  %pf11 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.9, i64 0, i64 0), double %p11)
+  %pf5 = call i32 (ptr, ...) @printf(ptr @.str.5, i32 %p5.z32)
+  %pf6 = call i32 (ptr, ...) @printf(ptr @.str.6, float %p6)
+  %pf7 = call i32 (ptr, ...) @printf(ptr @.str.7, ptr %p7)
+  %pf8 = call i32 (ptr, ...) @printf(ptr @.str.8, i64 %p8)
+  %pf9 = call i32 (ptr, ...) @printf(ptr @.str.9, double %p9)
+  %pf10 = call i32 (ptr, ...) @printf(ptr @.str.10, fp128 %p10)
+  %pf11 = call i32 (ptr, ...) @printf(ptr @.str.9, double %p11)
   ret i32 0
 }
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
-declare i32 @printf(i8* nocapture readonly, ...)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+declare i32 @printf(ptr nocapture readonly, ...)

diff  --git a/llvm/test/CodeGen/VE/Scalar/va_callee.ll b/llvm/test/CodeGen/VE/Scalar/va_callee.ll
index ecdf05dc0f3a0..8ed707dac8a71 100644
--- a/llvm/test/CodeGen/VE/Scalar/va_callee.ll
+++ b/llvm/test/CodeGen/VE/Scalar/va_callee.ll
@@ -12,21 +12,20 @@ define i32 @va_func(i32, ...) {
 ; CHECK:       ld %s24, 240(, %s9)
 ; CHECK:       ld %s25, 248(, %s9)
 
-  %va = alloca i8*, align 8
-  %va.i8 = bitcast i8** %va to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %va.i8)
-  call void @llvm.va_start(i8* nonnull %va.i8)
-  %p1 = va_arg i8** %va, i32
-  %p2 = va_arg i8** %va, i16
-  %p3 = va_arg i8** %va, i8
-  %p4 = va_arg i8** %va, i32
-  %p5 = va_arg i8** %va, i16
-  %p6 = va_arg i8** %va, i8
-  %p7 = va_arg i8** %va, float
-  %p8 = va_arg i8** %va, i8*
-  %p9 = va_arg i8** %va, i64
-  %p10 = va_arg i8** %va, double
-  call void @llvm.va_end(i8* nonnull %va.i8)
+  %va = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %va)
+  call void @llvm.va_start(ptr nonnull %va)
+  %p1 = va_arg ptr %va, i32
+  %p2 = va_arg ptr %va, i16
+  %p3 = va_arg ptr %va, i8
+  %p4 = va_arg ptr %va, i32
+  %p5 = va_arg ptr %va, i16
+  %p6 = va_arg ptr %va, i8
+  %p7 = va_arg ptr %va, float
+  %p8 = va_arg ptr %va, ptr
+  %p9 = va_arg ptr %va, i64
+  %p10 = va_arg ptr %va, double
+  call void @llvm.va_end(ptr nonnull %va)
   call void @use_i32(i32 %p1)
   call void @use_s16(i16 %p2)
   call void @use_s8(i8 %p3)
@@ -34,10 +33,10 @@ define i32 @va_func(i32, ...) {
   call void @use_u16(i16 %p5)
   call void @use_u8(i8 %p6)
   call void @use_float(float %p7)
-  call void @use_i8p(i8* %p8)
+  call void @use_i8p(ptr %p8)
   call void @use_i64(i64 %p9)
   call void @use_double(double %p10)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %va.i8)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %va)
   ret i32 0
 }
 
@@ -53,28 +52,26 @@ define i32 @va_copy0(i32, ...) {
 ; CHECK:       ld %s24,
 ; CHECK:       ld %s25,
 
-  %va = alloca i8*, align 8
-  %va.i8 = bitcast i8** %va to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %va.i8)
-  call void @llvm.va_start(i8* nonnull %va.i8)
-  %vb = alloca i8*, align 8
-  %vb.i8 = bitcast i8** %vb to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %va.i8)
-  call void @llvm.va_copy(i8* nonnull %vb.i8, i8* nonnull %va.i8)
-  call void @llvm.va_end(i8* nonnull %va.i8)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %va.i8)
-  %p1 = va_arg i8** %vb, i32
-  %p2 = va_arg i8** %vb, i16
-  %p3 = va_arg i8** %vb, i8
-  %p4 = va_arg i8** %vb, i32
-  %p5 = va_arg i8** %vb, i16
-  %p6 = va_arg i8** %vb, i8
-  %p7 = va_arg i8** %vb, float
-  %p8 = va_arg i8** %vb, i8*
-  %p9 = va_arg i8** %vb, i64
-  %p10 = va_arg i8** %vb, double
-  call void @llvm.va_end(i8* nonnull %vb.i8)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %vb.i8)
+  %va = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %va)
+  call void @llvm.va_start(ptr nonnull %va)
+  %vb = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %va)
+  call void @llvm.va_copy(ptr nonnull %vb, ptr nonnull %va)
+  call void @llvm.va_end(ptr nonnull %va)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %va)
+  %p1 = va_arg ptr %vb, i32
+  %p2 = va_arg ptr %vb, i16
+  %p3 = va_arg ptr %vb, i8
+  %p4 = va_arg ptr %vb, i32
+  %p5 = va_arg ptr %vb, i16
+  %p6 = va_arg ptr %vb, i8
+  %p7 = va_arg ptr %vb, float
+  %p8 = va_arg ptr %vb, ptr
+  %p9 = va_arg ptr %vb, i64
+  %p10 = va_arg ptr %vb, double
+  call void @llvm.va_end(ptr nonnull %vb)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %vb)
   call void @use_i32(i32 %p1)
   call void @use_s16(i16 %p2)
   call void @use_s8(i8 %p3)
@@ -82,7 +79,7 @@ define i32 @va_copy0(i32, ...) {
   call void @use_u16(i16 %p5)
   call void @use_u8(i8 %p6)
   call void @use_float(float %p7)
-  call void @use_i8p(i8* %p8)
+  call void @use_i8p(ptr %p8)
   call void @use_i64(i64 %p9)
   call void @use_double(double %p10)
   ret i32 0
@@ -100,27 +97,25 @@ define i32 @va_copy8(i32, ...) {
 ; CHECK:       ld %s24,
 ; CHECK:       ld %s25,
 
-  %va = alloca i8*, align 8
-  %va.i8 = bitcast i8** %va to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %va.i8)
-  call void @llvm.va_start(i8* nonnull %va.i8)
-  %p1 = va_arg i8** %va, i32
-  %p2 = va_arg i8** %va, i16
-  %p3 = va_arg i8** %va, i8
-  %p4 = va_arg i8** %va, i32
-  %p5 = va_arg i8** %va, i16
-  %p6 = va_arg i8** %va, i8
-  %p7 = va_arg i8** %va, float
+  %va = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %va)
+  call void @llvm.va_start(ptr nonnull %va)
+  %p1 = va_arg ptr %va, i32
+  %p2 = va_arg ptr %va, i16
+  %p3 = va_arg ptr %va, i8
+  %p4 = va_arg ptr %va, i32
+  %p5 = va_arg ptr %va, i16
+  %p6 = va_arg ptr %va, i8
+  %p7 = va_arg ptr %va, float
 
-  %vc = alloca i8*, align 8
-  %vc.i8 = bitcast i8** %vc to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %va.i8)
-  call void @llvm.va_copy(i8* nonnull %vc.i8, i8* nonnull %va.i8)
-  call void @llvm.va_end(i8* nonnull %va.i8)
-  %p8 = va_arg i8** %vc, i8*
-  %p9 = va_arg i8** %vc, i64
-  %p10 = va_arg i8** %vc, double
-  call void @llvm.va_end(i8* nonnull %vc.i8)
+  %vc = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %va)
+  call void @llvm.va_copy(ptr nonnull %vc, ptr nonnull %va)
+  call void @llvm.va_end(ptr nonnull %va)
+  %p8 = va_arg ptr %vc, ptr
+  %p9 = va_arg ptr %vc, i64
+  %p10 = va_arg ptr %vc, double
+  call void @llvm.va_end(ptr nonnull %vc)
   call void @use_i32(i32 %p1)
   call void @use_s16(i16 %p2)
   call void @use_s8(i8 %p3)
@@ -128,10 +123,10 @@ define i32 @va_copy8(i32, ...) {
   call void @use_u16(i16 %p5)
   call void @use_u8(i8 %p6)
   call void @use_float(float %p7)
-  call void @use_i8p(i8* %p8)
+  call void @use_i8p(ptr %p8)
   call void @use_i64(i64 %p9)
   call void @use_double(double %p10)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %va.i8)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %va)
   ret i32 0
 }
 
@@ -141,12 +136,12 @@ declare void @use_u16(i16 zeroext)
 declare void @use_u8(i8 zeroext)
 declare void @use_s16(i16 signext)
 declare void @use_s8(i8 signext)
-declare void @use_i8p(i8*)
+declare void @use_i8p(ptr)
 declare void @use_float(float)
 declare void @use_double(double)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)

diff  --git a/llvm/test/CodeGen/VE/Scalar/va_caller.ll b/llvm/test/CodeGen/VE/Scalar/va_caller.ll
index 931b6bdab4935..3cffc5e6c9fcd 100644
--- a/llvm/test/CodeGen/VE/Scalar/va_caller.ll
+++ b/llvm/test/CodeGen/VE/Scalar/va_caller.ll
@@ -48,6 +48,6 @@ define i32 @caller() {
 ; CHECK-NEXT:    bsic %s10, (, %s12)
 ; CHECK-NEXT:    or %s0, 0, (0)1
 ; CHECK-NEXT:    or %s11, 0, %s9
-  call i32 (i32, ...) @func(i32 0, i16 1, i8 2, i32 3, i16 4, i8 5, float 6.0, i8* null, i64 8, double 9.0, i128 10, fp128 0xLA000000000000000)
+  call i32 (i32, ...) @func(i32 0, i16 1, i8 2, i32 3, i16 4, i8 5, float 6.0, ptr null, i64 8, double 9.0, i128 10, fp128 0xLA000000000000000)
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll b/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
index 17a9775d4474c..1320c0e4e2ec0 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/lsv.ll
@@ -6,7 +6,7 @@
 ;;;   We test LSVrr_v and LVSvr instructions.
 
 ; Function Attrs: nounwind
-define void @lsv_vvss(i8* %0, i64 %1, i32 signext %2) {
+define void @lsv_vvss(ptr %0, i64 %1, i32 signext %2) {
 ; CHECK-LABEL: lsv_vvss:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -16,23 +16,23 @@ define void @lsv_vvss(i8* %0, i64 %1, i32 signext %2) {
 ; CHECK-NEXT:    lsv %v0(%s2), %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %5 = tail call fast <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double> %4, i32 %2, i64 %1)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind readnone
 declare <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double>, i32, i64)
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind readonly
-define i64 @lvsl_vssl_imm(i8* readonly %0, i32 signext %1) {
+define i64 @lvsl_vssl_imm(ptr readonly %0, i32 signext %1) {
 ; CHECK-LABEL: lvsl_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -41,7 +41,7 @@ define i64 @lvsl_vssl_imm(i8* readonly %0, i32 signext %1) {
 ; CHECK-NEXT:    and %s0, %s1, (32)0
 ; CHECK-NEXT:    lvs %s0, %v0(%s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call i64 @llvm.ve.vl.lvsl.svs(<256 x double> %3, i32 %1)
   ret i64 %4
 }
@@ -50,7 +50,7 @@ define i64 @lvsl_vssl_imm(i8* readonly %0, i32 signext %1) {
 declare i64 @llvm.ve.vl.lvsl.svs(<256 x double>, i32)
 
 ; Function Attrs: nounwind readonly
-define double @lvsd_vssl_imm(i8* readonly %0, i32 signext %1) {
+define double @lvsd_vssl_imm(ptr readonly %0, i32 signext %1) {
 ; CHECK-LABEL: lvsd_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -59,7 +59,7 @@ define double @lvsd_vssl_imm(i8* readonly %0, i32 signext %1) {
 ; CHECK-NEXT:    and %s0, %s1, (32)0
 ; CHECK-NEXT:    lvs %s0, %v0(%s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call fast double @llvm.ve.vl.lvsd.svs(<256 x double> %3, i32 %1)
   ret double %4
 }
@@ -68,7 +68,7 @@ define double @lvsd_vssl_imm(i8* readonly %0, i32 signext %1) {
 declare double @llvm.ve.vl.lvsd.svs(<256 x double>, i32)
 
 ; Function Attrs: nounwind readonly
-define float @lvss_vssl_imm(i8* readonly %0, i32 signext %1) {
+define float @lvss_vssl_imm(ptr readonly %0, i32 signext %1) {
 ; CHECK-LABEL: lvss_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -77,7 +77,7 @@ define float @lvss_vssl_imm(i8* readonly %0, i32 signext %1) {
 ; CHECK-NEXT:    and %s0, %s1, (32)0
 ; CHECK-NEXT:    lvs %s0, %v0(%s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call fast float @llvm.ve.vl.lvss.svs(<256 x double> %3, i32 %1)
   ret float %4
 }

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/lvlgen.ll b/llvm/test/CodeGen/VE/VELIntrinsics/lvlgen.ll
index c4db624424511..8fd04210dfe8b 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/lvlgen.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/lvlgen.ll
@@ -3,14 +3,14 @@
 ; Test for correct placement of 'lvl' instructions
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
-declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
 
 ; Check that the backend can handle constant VL as well as parametric VL
 ; sources.
 
 ; Function Attrs: nounwind
-define void @switching_vl(i32 %evl, i32 %evl2, i8* %P, i8* %Q) {
+define void @switching_vl(i32 %evl, i32 %evl2, ptr %P, ptr %Q) {
 ; CHECK-LABEL: switching_vl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s4, 256
@@ -30,12 +30,12 @@ define void @switching_vl(i32 %evl, i32 %evl2, i8* %P, i8* %Q) {
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vst %v0, 16, %s3
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, i8* %Q, i32 %evl)
-  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, i8* %P, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, i8* %Q, i32 %evl2)
-  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, i8* %Q, i32 %evl)
+  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, ptr %Q, i32 %evl)
+  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, ptr %P, i32 128)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, ptr %Q, i32 %evl2)
+  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 128)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, ptr %Q, i32 %evl)
   ret void
 }
 
@@ -43,7 +43,7 @@ define void @switching_vl(i32 %evl, i32 %evl2, i8* %P, i8* %Q) {
 ; in a basic block.
 
 ; Function Attrs: nounwind
-define void @stable_vl(i32 %evl, i8* %P, i8* %Q) {
+define void @stable_vl(i32 %evl, ptr %P, ptr %Q) {
 ; CHECK-LABEL: stable_vl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
@@ -55,19 +55,19 @@ define void @stable_vl(i32 %evl, i8* %P, i8* %Q) {
 ; CHECK-NEXT:    vld %v0, 8, %s1
 ; CHECK-NEXT:    vst %v0, 16, %s2
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, i8* %Q, i32 %evl)
-  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, i8* %Q, i32 %evl)
-  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, i8* %Q, i32 %evl)
+  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, ptr %Q, i32 %evl)
+  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, ptr %Q, i32 %evl)
+  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, ptr %Q, i32 %evl)
   ret void
 }
 
 ;;; Check the case we have a call in the middle of vector instructions.
 
 ; Function Attrs: nounwind
-define void @call_invl(i32 %evl, i8* %P, i8* %Q) {
+define void @call_invl(i32 %evl, ptr %P, ptr %Q) {
 ; CHECK-LABEL: call_invl:
 ; CHECK:       .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT:    st %s18, 288(, %s11) # 8-byte Folded Spill
@@ -92,13 +92,13 @@ define void @call_invl(i32 %evl, i8* %P, i8* %Q) {
 ; CHECK-NEXT:    ld %s19, 296(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld %s18, 288(, %s11) # 8-byte Folded Reload
 ; CHECK-NEXT:    or %s11, 0, %s9
-  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, i8* %Q, i32 %evl)
+  %l0 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l0, i64 16, ptr %Q, i32 %evl)
   call void @fun()
-  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, i8* %Q, i32 %evl)
-  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %P, i32 %evl)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, i8* %Q, i32 %evl)
+  %l1 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 16, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l1, i64 16, ptr %Q, i32 %evl)
+  %l2 = tail call <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %P, i32 %evl)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %l2, i64 16, ptr %Q, i32 %evl)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/lvm.ll b/llvm/test/CodeGen/VE/VELIntrinsics/lvm.ll
index 8280d4f244ca4..c5d5dc95a64ab 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/lvm.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/lvm.ll
@@ -6,7 +6,7 @@
 ;;;   We test LVMir_m, LVMyir_y, SVMmi, and SVMyi instructions.
 
 ; Function Attrs: nounwind readnone
-define i64 @lvm_mmss(i8* nocapture readnone %0, i64 %1) {
+define i64 @lvm_mmss(ptr nocapture readnone %0, i64 %1) {
 ; CHECK-LABEL: lvm_mmss:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lvm %vm1, 3, %s1
@@ -24,7 +24,7 @@ declare <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1>, i64, i64)
 declare i64 @llvm.ve.vl.svm.sms(<256 x i1>, i64)
 
 ; Function Attrs: nounwind readnone
-define i64 @lvml_MMss(i8* nocapture readnone %0, i64 %1) {
+define i64 @lvml_MMss(ptr nocapture readnone %0, i64 %1) {
 ; CHECK-LABEL: lvml_MMss:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lvm %vm2, 1, %s1

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/pack.ll b/llvm/test/CodeGen/VE/VELIntrinsics/pack.ll
index 7ad702ace430e..6ff597b7ac694 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/pack.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/pack.ll
@@ -6,24 +6,22 @@
 ;;;   We test pack_f32p and pack_f32a pseudo instruction.
 
 ; Function Attrs: nounwind readonly
-define fastcc i64 @pack_f32p(float* readonly %0, float* readonly %1) {
+define fastcc i64 @pack_f32p(ptr readonly %0, ptr readonly %1) {
 ; CHECK-LABEL: pack_f32p:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldu %s0, (, %s0)
 ; CHECK-NEXT:    ldl.zx %s1, (, %s1)
 ; CHECK-NEXT:    or %s0, %s0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = bitcast float* %0 to i8*
-  %4 = bitcast float* %1 to i8*
-  %5 = tail call i64 @llvm.ve.vl.pack.f32p(i8* %3, i8* %4)
-  ret i64 %5
+  %3 = tail call i64 @llvm.ve.vl.pack.f32p(ptr %0, ptr %1)
+  ret i64 %3
 }
 
 ; Function Attrs: nounwind readonly
-declare i64 @llvm.ve.vl.pack.f32p(i8*, i8*)
+declare i64 @llvm.ve.vl.pack.f32p(ptr, ptr)
 
 ; Function Attrs: nounwind readonly
-define fastcc i64 @pack_f32a(float* readonly %0) {
+define fastcc i64 @pack_f32a(ptr readonly %0) {
 ; CHECK-LABEL: pack_f32a:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ldl.zx %s0, (, %s0)
@@ -32,10 +30,9 @@ define fastcc i64 @pack_f32a(float* readonly %0) {
 ; CHECK-NEXT:    lea.sl %s1, 1(, %s1)
 ; CHECK-NEXT:    mulu.l %s0, %s0, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = bitcast float* %0 to i8*
-  %3 = tail call i64 @llvm.ve.vl.pack.f32a(i8* %2)
-  ret i64 %3
+  %2 = tail call i64 @llvm.ve.vl.pack.f32a(ptr %0)
+  ret i64 %2
 }
 
 ; Function Attrs: nounwind readonly
-declare i64 @llvm.ve.vl.pack.f32a(i8*)
+declare i64 @llvm.ve.vl.pack.f32a(ptr)

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/pfchv.ll b/llvm/test/CodeGen/VE/VELIntrinsics/pfchv.ll
index 3f4acd4d78c00..88ecffbf0c6a1 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/pfchv.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/pfchv.ll
@@ -6,55 +6,55 @@
 ;;;   We test PFCHVrrl, PFCHVirl, PFCHVNCrrl, and PFCHVNCirl instructions.
 
 ; Function Attrs: nounwind
-define void @pfchv_vssl(i8* %0, i64 %1) {
+define void @pfchv_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: pfchv_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    pfchv %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  tail call void @llvm.ve.vl.pfchv.ssl(i64 %1, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.pfchv.ssl(i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: inaccessiblemem_or_argmemonly nounwind
-declare void @llvm.ve.vl.pfchv.ssl(i64, i8*, i32)
+declare void @llvm.ve.vl.pfchv.ssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @pfchv_vssl_imm(i8* %0) {
+define void @pfchv_vssl_imm(ptr %0) {
 ; CHECK-LABEL: pfchv_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    pfchv 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  tail call void @llvm.ve.vl.pfchv.ssl(i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.pfchv.ssl(i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @pfchvnc_vssl(i8* %0, i64 %1) {
+define void @pfchvnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: pfchvnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    pfchv.nc %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  tail call void @llvm.ve.vl.pfchvnc.ssl(i64 %1, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.pfchvnc.ssl(i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: inaccessiblemem_or_argmemonly nounwind
-declare void @llvm.ve.vl.pfchvnc.ssl(i64, i8*, i32)
+declare void @llvm.ve.vl.pfchvnc.ssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @pfchvnc_vssl_imm(i8* %0) {
+define void @pfchvnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: pfchvnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    pfchv.nc 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  tail call void @llvm.ve.vl.pfchvnc.ssl(i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.pfchvnc.ssl(i64 8, ptr %0, i32 256)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vbrd.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vbrd.ll
index faba6f8c5222c..8e43d22c0e949 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/vbrd.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vbrd.ll
@@ -6,7 +6,7 @@
 ;;;   We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions.
 
 ; Function Attrs: nounwind
-define void @vbrdd_vsl(double %0, i8* %1) {
+define void @vbrdd_vsl(double %0, ptr %1) {
 ; CHECK-LABEL: vbrdd_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -17,7 +17,7 @@ define void @vbrdd_vsl(double %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsl(double %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
@@ -25,7 +25,7 @@ define void @vbrdd_vsl(double %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdd.vsl(double, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdd_vsvl(double %0, i8* %1) {
+define void @vbrdd_vsvl(double %0, ptr %1) {
 ; CHECK-LABEL: vbrdd_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -34,23 +34,23 @@ define void @vbrdd_vsvl(double %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsvl(double %0, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind readnone
 declare <256 x double> @llvm.ve.vl.vbrdd.vsvl(double, <256 x double>, i32)
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdd_vsmvl(double %0, i8* %1) {
+define void @vbrdd_vsmvl(double %0, ptr %1) {
 ; CHECK-LABEL: vbrdd_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -67,11 +67,11 @@ define void @vbrdd_vsmvl(double %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, %s0, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = fptoui double %0 to i64
   %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
   %6 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double %0, <256 x i1> %5, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -82,7 +82,7 @@ declare <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1>, i64, i64)
 declare <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double, <256 x i1>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdl_vsl(i64 %0, i8* %1) {
+define void @vbrdl_vsl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -93,7 +93,7 @@ define void @vbrdl_vsl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
@@ -101,7 +101,7 @@ define void @vbrdl_vsl(i64 %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdl.vsl(i64, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdl_vsvl(i64 %0, i8* %1) {
+define void @vbrdl_vsvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -110,9 +110,9 @@ define void @vbrdl_vsvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64 %0, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -120,7 +120,7 @@ define void @vbrdl_vsvl(i64 %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdl_vsmvl(i64 %0, i8* %1) {
+define void @vbrdl_vsmvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -130,10 +130,10 @@ define void @vbrdl_vsmvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, %s0, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %0)
   %5 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 %0, <256 x i1> %4, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -141,7 +141,7 @@ define void @vbrdl_vsmvl(i64 %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64, <256 x i1>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdl_imm_vsl(i64 %0, i8* %1) {
+define void @vbrdl_imm_vsl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_imm_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -152,12 +152,12 @@ define void @vbrdl_imm_vsl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 31, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vbrdl_imm_vsvl(i64 %0, i8* %1) {
+define void @vbrdl_imm_vsvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_imm_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -166,14 +166,14 @@ define void @vbrdl_imm_vsvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, 31
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64 31, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vbrdl_imm_vsmvl(i64 %0, i8* %1) {
+define void @vbrdl_imm_vsmvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: vbrdl_imm_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -183,15 +183,15 @@ define void @vbrdl_imm_vsmvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    vbrd %v0, 31, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %0)
   %5 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 31, <256 x i1> %4, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %5, i64 8, ptr %1, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vbrds_vsl(float %0, i8* %1) {
+define void @vbrds_vsl(float %0, ptr %1) {
 ; CHECK-LABEL: vbrds_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -202,7 +202,7 @@ define void @vbrds_vsl(float %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsl(float %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
@@ -210,7 +210,7 @@ define void @vbrds_vsl(float %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrds.vsl(float, i32)
 
 ; Function Attrs: nounwind
-define void @vbrds_vsvl(float %0, i8* %1) {
+define void @vbrds_vsvl(float %0, ptr %1) {
 ; CHECK-LABEL: vbrds_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -219,9 +219,9 @@ define void @vbrds_vsvl(float %0, i8* %1) {
 ; CHECK-NEXT:    vbrdu %v0, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsvl(float %0, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -229,7 +229,7 @@ define void @vbrds_vsvl(float %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrds.vsvl(float, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrds_vsmvl(float %0, i8* %1) {
+define void @vbrds_vsmvl(float %0, ptr %1) {
 ; CHECK-LABEL: vbrds_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -248,11 +248,11 @@ define void @vbrds_vsmvl(float %0, i8* %1) {
 ; CHECK-NEXT:    vbrdu %v0, %s0, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = fptoui float %0 to i64
   %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
   %6 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsmvl(float %0, <256 x i1> %5, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -260,7 +260,7 @@ define void @vbrds_vsmvl(float %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrds.vsmvl(float, <256 x i1>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdw_vsl(i32 signext %0, i8* %1) {
+define void @vbrdw_vsl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
@@ -272,7 +272,7 @@ define void @vbrdw_vsl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
@@ -280,7 +280,7 @@ define void @vbrdw_vsl(i32 signext %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdw.vsl(i32, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdw_vsvl(i32 signext %0, i8* %1) {
+define void @vbrdw_vsvl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -290,9 +290,9 @@ define void @vbrdw_vsvl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    vbrdl %v0, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32 %0, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -300,7 +300,7 @@ define void @vbrdw_vsvl(i32 signext %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdw_vsmvl(i32 signext %0, i8* %1) {
+define void @vbrdw_vsmvl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -311,11 +311,11 @@ define void @vbrdw_vsmvl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    vbrdl %v0, %s3, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = sext i32 %0 to i64
   %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
   %6 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 %0, <256 x i1> %5, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -323,7 +323,7 @@ define void @vbrdw_vsmvl(i32 signext %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32, <256 x i1>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vbrdw_imm_vsl(i32 signext %0, i8* %1) {
+define void @vbrdw_imm_vsl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_imm_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -334,12 +334,12 @@ define void @vbrdw_imm_vsl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 31, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vbrdw_imm_vsvl(i32 signext %0, i8* %1) {
+define void @vbrdw_imm_vsvl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_imm_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -348,14 +348,14 @@ define void @vbrdw_imm_vsvl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    vbrdl %v0, 31
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32 31, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vbrdw_imm_vsmvl(i32 signext %0, i8* %1) {
+define void @vbrdw_imm_vsmvl(i32 signext %0, ptr %1) {
 ; CHECK-LABEL: vbrdw_imm_vsmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -365,16 +365,16 @@ define void @vbrdw_imm_vsmvl(i32 signext %0, i8* %1) {
 ; CHECK-NEXT:    vbrdl %v0, 31, %vm1
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = sext i32 %0 to i64
   %5 = tail call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> undef, i64 3, i64 %4)
   %6 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 31, <256 x i1> %5, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, ptr %1, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @pvbrd_vsl(i64 %0, i8* %1) {
+define void @pvbrd_vsl(i64 %0, ptr %1) {
 ; CHECK-LABEL: pvbrd_vsl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -385,7 +385,7 @@ define void @pvbrd_vsl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
   %3 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsl(i64 %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, i8* %1)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %3, ptr %1)
   ret void
 }
 
@@ -393,7 +393,7 @@ define void @pvbrd_vsl(i64 %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.pvbrd.vsl(i64, i32)
 
 ; Function Attrs: nounwind
-define void @pvbrd_vsvl(i64 %0, i8* %1) {
+define void @pvbrd_vsvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: pvbrd_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -402,9 +402,9 @@ define void @pvbrd_vsvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    pvbrd %v0, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsvl(i64 %0, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %1, i32 256)
   ret void
 }
 
@@ -412,7 +412,7 @@ define void @pvbrd_vsvl(i64 %0, i8* %1) {
 declare <256 x double> @llvm.ve.vl.pvbrd.vsvl(i64, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @pvbrd_vsMvl(i64 %0, i8* %1) {
+define void @pvbrd_vsMvl(i64 %0, ptr %1) {
 ; CHECK-LABEL: pvbrd_vsMvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -423,11 +423,11 @@ define void @pvbrd_vsMvl(i64 %0, i8* %1) {
 ; CHECK-NEXT:    pvbrd %v0, %s0, %vm2
 ; CHECK-NEXT:    vst %v0, 8, %s1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
   %4 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> undef, i64 1, i64 %0)
   %5 = tail call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> %4, i64 6, i64 %0)
   %6 = tail call fast <256 x double> @llvm.ve.vl.pvbrd.vsMvl(i64 %0, <512 x i1> %5, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, i8* %1, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %6, i64 8, ptr %1, i32 256)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll
index 6068b816f911a..a2167286b3ba3 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vld.ll
@@ -6,7 +6,7 @@
 ;;;   We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions.
 
 ; Function Attrs: nounwind
-define void @vld_vssl(i8* %0, i64 %1) {
+define void @vld_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vld_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -16,16 +16,16 @@ define void @vld_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vld_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vld_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vld_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -36,17 +36,17 @@ define void @vld_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vld_vssl_imm(i8* %0) {
+define void @vld_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vld_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -56,13 +56,13 @@ define void @vld_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vld_vssvl_imm(i8* %0, i8* %1) {
+define void @vld_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vld_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -73,14 +73,14 @@ define void @vld_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldnc_vssl(i8* %0, i64 %1) {
+define void @vldnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -90,16 +90,16 @@ define void @vldnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -110,17 +110,17 @@ define void @vldnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldnc_vssl_imm(i8* %0) {
+define void @vldnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -130,13 +130,13 @@ define void @vldnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -147,14 +147,14 @@ define void @vldnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu_vssl(i8* %0, i64 %1) {
+define void @vldu_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldu_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -164,16 +164,16 @@ define void @vldu_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldu.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldu_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldu_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldu_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -184,17 +184,17 @@ define void @vldu_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldu.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldu_vssl_imm(i8* %0) {
+define void @vldu_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldu_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -204,13 +204,13 @@ define void @vldu_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu_vssvl_imm(i8* %0, i8* %1) {
+define void @vldu_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldu_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -221,14 +221,14 @@ define void @vldu_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldunc_vssl(i8* %0, i64 %1) {
+define void @vldunc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldunc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -238,16 +238,16 @@ define void @vldunc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldunc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldunc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldunc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldunc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldunc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -258,17 +258,17 @@ define void @vldunc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldunc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldunc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldunc_vssl_imm(i8* %0) {
+define void @vldunc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldunc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -278,13 +278,13 @@ define void @vldunc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldunc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldunc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldunc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -295,14 +295,14 @@ define void @vldunc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlsx_vssl(i8* %0, i64 %1) {
+define void @vldlsx_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldlsx_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -312,16 +312,16 @@ define void @vldlsx_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlsx.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldlsx.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldlsx_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldlsx_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldlsx_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -332,17 +332,17 @@ define void @vldlsx_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldlsx_vssl_imm(i8* %0) {
+define void @vldlsx_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldlsx_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -352,13 +352,13 @@ define void @vldlsx_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlsx_vssvl_imm(i8* %0, i8* %1) {
+define void @vldlsx_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldlsx_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -369,14 +369,14 @@ define void @vldlsx_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlsxnc_vssl(i8* %0, i64 %1) {
+define void @vldlsxnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldlsxnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -386,16 +386,16 @@ define void @vldlsxnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldlsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldlsxnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldlsxnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -406,17 +406,17 @@ define void @vldlsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldlsxnc_vssl_imm(i8* %0) {
+define void @vldlsxnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldlsxnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -426,13 +426,13 @@ define void @vldlsxnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlsxnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldlsxnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldlsxnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -443,14 +443,14 @@ define void @vldlsxnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlzx_vssl(i8* %0, i64 %1) {
+define void @vldlzx_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldlzx_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -460,16 +460,16 @@ define void @vldlzx_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlzx.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldlzx.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldlzx_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldlzx_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldlzx_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -480,17 +480,17 @@ define void @vldlzx_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldlzx_vssl_imm(i8* %0) {
+define void @vldlzx_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldlzx_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -500,13 +500,13 @@ define void @vldlzx_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlzx_vssvl_imm(i8* %0, i8* %1) {
+define void @vldlzx_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldlzx_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -517,14 +517,14 @@ define void @vldlzx_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlzxnc_vssl(i8* %0, i64 %1) {
+define void @vldlzxnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldlzxnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -534,16 +534,16 @@ define void @vldlzxnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldlzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldlzxnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldlzxnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -554,17 +554,17 @@ define void @vldlzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldlzxnc_vssl_imm(i8* %0) {
+define void @vldlzxnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldlzxnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -574,13 +574,13 @@ define void @vldlzxnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldlzxnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldlzxnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldlzxnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -591,14 +591,14 @@ define void @vldlzxnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vld2d_vssl(i8* %0, i64 %1) {
+define void @vld2d_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vld2d_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -608,16 +608,16 @@ define void @vld2d_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld2d.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld2d.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vld2d_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vld2d_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vld2d_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -628,17 +628,17 @@ define void @vld2d_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld2d.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vld2d.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vld2d_vssl_imm(i8* %0) {
+define void @vld2d_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vld2d_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -648,13 +648,13 @@ define void @vld2d_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vld2d_vssvl_imm(i8* %0, i8* %1) {
+define void @vld2d_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vld2d_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -665,14 +665,14 @@ define void @vld2d_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vld2dnc_vssl(i8* %0, i64 %1) {
+define void @vld2dnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vld2dnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -682,16 +682,16 @@ define void @vld2dnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vld2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vld2dnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vld2dnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -702,17 +702,17 @@ define void @vld2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vld2dnc_vssl_imm(i8* %0) {
+define void @vld2dnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vld2dnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -722,13 +722,13 @@ define void @vld2dnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vld2dnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vld2dnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vld2dnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -739,14 +739,14 @@ define void @vld2dnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu2d_vssl(i8* %0, i64 %1) {
+define void @vldu2d_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldu2d_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -756,16 +756,16 @@ define void @vldu2d_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu2d.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldu2d.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldu2d_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldu2d_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldu2d_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -776,17 +776,17 @@ define void @vldu2d_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldu2d_vssl_imm(i8* %0) {
+define void @vldu2d_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldu2d_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -796,13 +796,13 @@ define void @vldu2d_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu2d_vssvl_imm(i8* %0, i8* %1) {
+define void @vldu2d_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldu2d_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -813,14 +813,14 @@ define void @vldu2d_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu2dnc_vssl(i8* %0, i64 %1) {
+define void @vldu2dnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldu2dnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -830,16 +830,16 @@ define void @vldu2dnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldu2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldu2dnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldu2dnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -850,17 +850,17 @@ define void @vldu2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldu2dnc_vssl_imm(i8* %0) {
+define void @vldu2dnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldu2dnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -870,13 +870,13 @@ define void @vldu2dnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldu2dnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldu2dnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldu2dnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -887,14 +887,14 @@ define void @vldu2dnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dsx_vssl(i8* %0, i64 %1) {
+define void @vldl2dsx_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldl2dsx_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -904,16 +904,16 @@ define void @vldl2dsx_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dsx_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldl2dsx_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldl2dsx_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -924,17 +924,17 @@ define void @vldl2dsx_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dsx_vssl_imm(i8* %0) {
+define void @vldl2dsx_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldl2dsx_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -944,13 +944,13 @@ define void @vldl2dsx_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dsx_vssvl_imm(i8* %0, i8* %1) {
+define void @vldl2dsx_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldl2dsx_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -961,14 +961,14 @@ define void @vldl2dsx_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dsxnc_vssl(i8* %0, i64 %1) {
+define void @vldl2dsxnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldl2dsxnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -978,16 +978,16 @@ define void @vldl2dsxnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldl2dsxnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldl2dsxnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -998,17 +998,17 @@ define void @vldl2dsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dsxnc_vssl_imm(i8* %0) {
+define void @vldl2dsxnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldl2dsxnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1018,13 +1018,13 @@ define void @vldl2dsxnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dsxnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldl2dsxnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldl2dsxnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1035,14 +1035,14 @@ define void @vldl2dsxnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dzx_vssl(i8* %0, i64 %1) {
+define void @vldl2dzx_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldl2dzx_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1052,16 +1052,16 @@ define void @vldl2dzx_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dzx_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldl2dzx_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldl2dzx_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -1072,17 +1072,17 @@ define void @vldl2dzx_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dzx_vssl_imm(i8* %0) {
+define void @vldl2dzx_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldl2dzx_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1092,13 +1092,13 @@ define void @vldl2dzx_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dzx_vssvl_imm(i8* %0, i8* %1) {
+define void @vldl2dzx_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldl2dzx_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1109,14 +1109,14 @@ define void @vldl2dzx_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dzxnc_vssl(i8* %0, i64 %1) {
+define void @vldl2dzxnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vldl2dzxnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1126,16 +1126,16 @@ define void @vldl2dzxnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
+define void @vldl2dzxnc_vssvl(ptr %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: vldl2dzxnc_vssvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s3, 256
@@ -1146,17 +1146,17 @@ define void @vldl2dzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %2, i32 256)
-  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, ptr %2, i32 256)
+  %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, ptr %0, <256 x double> %4, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, ptr %0, i64 %1)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64, i8*, <256 x double>, i32)
+declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64, ptr, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vldl2dzxnc_vssl_imm(i8* %0) {
+define void @vldl2dzxnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vldl2dzxnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1166,13 +1166,13 @@ define void @vldl2dzxnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %0, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, ptr %0, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, ptr %0)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vldl2dzxnc_vssvl_imm(i8* %0, i8* %1) {
+define void @vldl2dzxnc_vssvl_imm(ptr %0, ptr %1) {
 ; CHECK-LABEL: vldl2dzxnc_vssvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1183,8 +1183,8 @@ define void @vldl2dzxnc_vssvl_imm(i8* %0, i8* %1) {
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    #NO_APP
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %1, i32 256)
-  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
-  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, ptr %1, i32 256)
+  %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, ptr %0, <256 x double> %3, i32 256)
+  tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, ptr %0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll
index c54d3dfe39d2c..4d34b26441393 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vmv.ll
@@ -6,7 +6,7 @@
 ;;;   We test VMVivl and VMVivl_v, and VMVivml_v instructions.
 
 ; Function Attrs: nounwind
-define void @vmv_vsvl(i8* %0, i32 signext %1) {
+define void @vmv_vsvl(ptr %0, i32 signext %1) {
 ; CHECK-LABEL: vmv_vsvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -16,23 +16,23 @@ define void @vmv_vsvl(i8* %0, i32 signext %1) {
 ; CHECK-NEXT:    vmv %v0, %s1, %v0
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 %1, <256 x double> %3, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind readnone
 declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32)
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vmv_vsvl_imm(i8* %0) {
+define void @vmv_vsvl_imm(ptr %0) {
 ; CHECK-LABEL: vmv_vsvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -41,14 +41,14 @@ define void @vmv_vsvl_imm(i8* %0) {
 ; CHECK-NEXT:    vmv %v0, 31, %v0
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 31, <256 x double> %2, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vmv_vsvvl(i8* %0, i32 signext %1) {
+define void @vmv_vsvvl(ptr %0, i32 signext %1) {
 ; CHECK-LABEL: vmv_vsvvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -61,9 +61,9 @@ define void @vmv_vsvvl(i8* %0, i32 signext %1) {
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 %1, <256 x double> %3, <256 x double> %3, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
   ret void
 }
 
@@ -71,7 +71,7 @@ define void @vmv_vsvvl(i8* %0, i32 signext %1) {
 declare <256 x double> @llvm.ve.vl.vmv.vsvvl(i32, <256 x double>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vmv_vsvvl_imm(i8* %0) {
+define void @vmv_vsvvl_imm(ptr %0) {
 ; CHECK-LABEL: vmv_vsvvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -83,14 +83,14 @@ define void @vmv_vsvvl_imm(i8* %0) {
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 31, <256 x double> %2, <256 x double> %2, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vmv_vsvmvl(i8* %0, i32 signext %1) {
+define void @vmv_vsvmvl(ptr %0, i32 signext %1) {
 ; CHECK-LABEL: vmv_vsvmvl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -103,9 +103,9 @@ define void @vmv_vsvmvl(i8* %0, i32 signext %1) {
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 %1, <256 x double> %3, <256 x i1> undef, <256 x double> %3, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
   ret void
 }
 
@@ -113,7 +113,7 @@ define void @vmv_vsvmvl(i8* %0, i32 signext %1) {
 declare <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
 
 ; Function Attrs: nounwind
-define void @vmv_vsvmvl_imm(i8* %0) {
+define void @vmv_vsvmvl_imm(ptr %0) {
 ; CHECK-LABEL: vmv_vsvmvl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -125,8 +125,8 @@ define void @vmv_vsvmvl_imm(i8* %0) {
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
   %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 31, <256 x double> %2, <256 x i1> undef, <256 x double> %2, i32 128)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, i8* %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/VELIntrinsics/vst.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vst.ll
index 9839271147f73..d4d4744e3134e 100644
--- a/llvm/test/CodeGen/VE/VELIntrinsics/vst.ll
+++ b/llvm/test/CodeGen/VE/VELIntrinsics/vst.ll
@@ -6,7 +6,7 @@
 ;;;   We test VST*rrvl, VST*rrvml, VST*irvl, and VST*irvml instructions.
 
 ; Function Attrs: nounwind
-define void @vst_vssl(i8* %0, i64 %1) {
+define void @vst_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -14,19 +14,19 @@ define void @vst_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32)
+declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vst_vssml(i8* %0, i64 %1) {
+define void @vst_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -34,16 +34,16 @@ define void @vst_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vst.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vst_vssl_imm(i8* %0) {
+define void @vst_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vst_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -51,13 +51,13 @@ define void @vst_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst_vssml_imm(i8* %0) {
+define void @vst_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vst_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -65,13 +65,13 @@ define void @vst_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstnc_vssl(i8* %0, i64 %1) {
+define void @vstnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -79,16 +79,16 @@ define void @vstnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstnc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstnc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstnc_vssml(i8* %0, i64 %1) {
+define void @vstnc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstnc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -96,16 +96,16 @@ define void @vstnc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstnc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstnc_vssl_imm(i8* %0) {
+define void @vstnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -113,13 +113,13 @@ define void @vstnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstnc_vssml_imm(i8* %0) {
+define void @vstnc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstnc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -127,13 +127,13 @@ define void @vstnc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstot_vssl(i8* %0, i64 %1) {
+define void @vstot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -141,16 +141,16 @@ define void @vstot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstot_vssml(i8* %0, i64 %1) {
+define void @vstot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -158,16 +158,16 @@ define void @vstot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstot_vssl_imm(i8* %0) {
+define void @vstot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -175,13 +175,13 @@ define void @vstot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstot_vssml_imm(i8* %0) {
+define void @vstot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -189,13 +189,13 @@ define void @vstot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstncot_vssl(i8* %0, i64 %1) {
+define void @vstncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -203,16 +203,16 @@ define void @vstncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstncot_vssml(i8* %0, i64 %1) {
+define void @vstncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -220,16 +220,16 @@ define void @vstncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstncot_vssl_imm(i8* %0) {
+define void @vstncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -237,13 +237,13 @@ define void @vstncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstncot_vssml_imm(i8* %0) {
+define void @vstncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -251,13 +251,13 @@ define void @vstncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu_vssl(i8* %0, i64 %1) {
+define void @vstu_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -265,16 +265,16 @@ define void @vstu_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstu.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstu_vssml(i8* %0, i64 %1) {
+define void @vstu_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -282,16 +282,16 @@ define void @vstu_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstu.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstu_vssl_imm(i8* %0) {
+define void @vstu_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstu_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -299,13 +299,13 @@ define void @vstu_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu_vssml_imm(i8* %0) {
+define void @vstu_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstu_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -313,13 +313,13 @@ define void @vstu_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstunc_vssl(i8* %0, i64 %1) {
+define void @vstunc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstunc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -327,16 +327,16 @@ define void @vstunc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstunc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstunc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstunc_vssml(i8* %0, i64 %1) {
+define void @vstunc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstunc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -344,16 +344,16 @@ define void @vstunc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstunc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstunc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstunc_vssl_imm(i8* %0) {
+define void @vstunc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstunc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -361,13 +361,13 @@ define void @vstunc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstunc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstunc_vssml_imm(i8* %0) {
+define void @vstunc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstunc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -375,13 +375,13 @@ define void @vstunc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstunc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstuot_vssl(i8* %0, i64 %1) {
+define void @vstuot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstuot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -389,16 +389,16 @@ define void @vstuot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstuot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstuot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstuot_vssml(i8* %0, i64 %1) {
+define void @vstuot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstuot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -406,16 +406,16 @@ define void @vstuot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstuot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstuot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstuot_vssl_imm(i8* %0) {
+define void @vstuot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstuot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -423,13 +423,13 @@ define void @vstuot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstuot_vssml_imm(i8* %0) {
+define void @vstuot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstuot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -437,13 +437,13 @@ define void @vstuot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstuncot_vssl(i8* %0, i64 %1) {
+define void @vstuncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstuncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -451,16 +451,16 @@ define void @vstuncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstuncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstuncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstuncot_vssml(i8* %0, i64 %1) {
+define void @vstuncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstuncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -468,16 +468,16 @@ define void @vstuncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstuncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstuncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstuncot_vssl_imm(i8* %0) {
+define void @vstuncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstuncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -485,13 +485,13 @@ define void @vstuncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstuncot_vssml_imm(i8* %0) {
+define void @vstuncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstuncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -499,13 +499,13 @@ define void @vstuncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl_vssl(i8* %0, i64 %1) {
+define void @vstl_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -513,16 +513,16 @@ define void @vstl_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstl.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstl_vssml(i8* %0, i64 %1) {
+define void @vstl_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -530,16 +530,16 @@ define void @vstl_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstl.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstl_vssl_imm(i8* %0) {
+define void @vstl_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstl_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -547,13 +547,13 @@ define void @vstl_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl_vssml_imm(i8* %0) {
+define void @vstl_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstl_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -561,13 +561,13 @@ define void @vstl_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlnc_vssl(i8* %0, i64 %1) {
+define void @vstlnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -575,16 +575,16 @@ define void @vstlnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlnc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstlnc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstlnc_vssml(i8* %0, i64 %1) {
+define void @vstlnc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlnc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -592,16 +592,16 @@ define void @vstlnc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlnc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstlnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstlnc_vssl_imm(i8* %0) {
+define void @vstlnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstlnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -609,13 +609,13 @@ define void @vstlnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlnc_vssml_imm(i8* %0) {
+define void @vstlnc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstlnc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -623,13 +623,13 @@ define void @vstlnc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlot_vssl(i8* %0, i64 %1) {
+define void @vstlot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -637,16 +637,16 @@ define void @vstlot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstlot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstlot_vssml(i8* %0, i64 %1) {
+define void @vstlot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -654,16 +654,16 @@ define void @vstlot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstlot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstlot_vssl_imm(i8* %0) {
+define void @vstlot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstlot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -671,13 +671,13 @@ define void @vstlot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlot_vssml_imm(i8* %0) {
+define void @vstlot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstlot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -685,13 +685,13 @@ define void @vstlot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlncot_vssl(i8* %0, i64 %1) {
+define void @vstlncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -699,16 +699,16 @@ define void @vstlncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstlncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstlncot_vssml(i8* %0, i64 %1) {
+define void @vstlncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstlncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -716,16 +716,16 @@ define void @vstlncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstlncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstlncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstlncot_vssl_imm(i8* %0) {
+define void @vstlncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstlncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -733,13 +733,13 @@ define void @vstlncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstlncot_vssml_imm(i8* %0) {
+define void @vstlncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstlncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -747,13 +747,13 @@ define void @vstlncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2d_vssl(i8* %0, i64 %1) {
+define void @vst2d_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2d_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -761,16 +761,16 @@ define void @vst2d_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2d.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst2d.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vst2d_vssml(i8* %0, i64 %1) {
+define void @vst2d_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2d_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -778,16 +778,16 @@ define void @vst2d_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2d.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vst2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vst2d_vssl_imm(i8* %0) {
+define void @vst2d_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vst2d_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -795,13 +795,13 @@ define void @vst2d_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2d_vssml_imm(i8* %0) {
+define void @vst2d_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vst2d_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -809,13 +809,13 @@ define void @vst2d_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dnc_vssl(i8* %0, i64 %1) {
+define void @vst2dnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -823,16 +823,16 @@ define void @vst2dnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dnc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst2dnc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dnc_vssml(i8* %0, i64 %1) {
+define void @vst2dnc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dnc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -840,16 +840,16 @@ define void @vst2dnc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dnc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vst2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dnc_vssl_imm(i8* %0) {
+define void @vst2dnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vst2dnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -857,13 +857,13 @@ define void @vst2dnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dnc_vssml_imm(i8* %0) {
+define void @vst2dnc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vst2dnc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -871,13 +871,13 @@ define void @vst2dnc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dot_vssl(i8* %0, i64 %1) {
+define void @vst2dot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -885,16 +885,16 @@ define void @vst2dot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst2dot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dot_vssml(i8* %0, i64 %1) {
+define void @vst2dot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -902,16 +902,16 @@ define void @vst2dot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vst2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dot_vssl_imm(i8* %0) {
+define void @vst2dot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vst2dot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -919,13 +919,13 @@ define void @vst2dot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dot_vssml_imm(i8* %0) {
+define void @vst2dot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vst2dot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -933,13 +933,13 @@ define void @vst2dot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dncot_vssl(i8* %0, i64 %1) {
+define void @vst2dncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -947,16 +947,16 @@ define void @vst2dncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vst2dncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dncot_vssml(i8* %0, i64 %1) {
+define void @vst2dncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vst2dncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -964,16 +964,16 @@ define void @vst2dncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vst2d.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vst2dncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vst2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vst2dncot_vssl_imm(i8* %0) {
+define void @vst2dncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vst2dncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -981,13 +981,13 @@ define void @vst2dncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vst2dncot_vssml_imm(i8* %0) {
+define void @vst2dncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vst2dncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -995,13 +995,13 @@ define void @vst2dncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vst2d.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2d_vssl(i8* %0, i64 %1) {
+define void @vstu2d_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2d_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1009,16 +1009,16 @@ define void @vstu2d_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2d.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstu2d.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2d_vssml(i8* %0, i64 %1) {
+define void @vstu2d_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2d_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1026,16 +1026,16 @@ define void @vstu2d_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2d.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstu2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2d_vssl_imm(i8* %0) {
+define void @vstu2d_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstu2d_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1043,13 +1043,13 @@ define void @vstu2d_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2d_vssml_imm(i8* %0) {
+define void @vstu2d_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstu2d_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1057,13 +1057,13 @@ define void @vstu2d_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dnc_vssl(i8* %0, i64 %1) {
+define void @vstu2dnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1071,16 +1071,16 @@ define void @vstu2dnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dnc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstu2dnc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dnc_vssml(i8* %0, i64 %1) {
+define void @vstu2dnc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dnc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1088,16 +1088,16 @@ define void @vstu2dnc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dnc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstu2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dnc_vssl_imm(i8* %0) {
+define void @vstu2dnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1105,13 +1105,13 @@ define void @vstu2dnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dnc_vssml_imm(i8* %0) {
+define void @vstu2dnc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dnc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1119,13 +1119,13 @@ define void @vstu2dnc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dot_vssl(i8* %0, i64 %1) {
+define void @vstu2dot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1133,16 +1133,16 @@ define void @vstu2dot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstu2dot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dot_vssml(i8* %0, i64 %1) {
+define void @vstu2dot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1150,16 +1150,16 @@ define void @vstu2dot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstu2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dot_vssl_imm(i8* %0) {
+define void @vstu2dot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1167,13 +1167,13 @@ define void @vstu2dot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dot_vssml_imm(i8* %0) {
+define void @vstu2dot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1181,13 +1181,13 @@ define void @vstu2dot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dncot_vssl(i8* %0, i64 %1) {
+define void @vstu2dncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1195,16 +1195,16 @@ define void @vstu2dncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstu2dncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dncot_vssml(i8* %0, i64 %1) {
+define void @vstu2dncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstu2dncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1212,16 +1212,16 @@ define void @vstu2dncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstu2d.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstu2dncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstu2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstu2dncot_vssl_imm(i8* %0) {
+define void @vstu2dncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1229,13 +1229,13 @@ define void @vstu2dncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstu2dncot_vssml_imm(i8* %0) {
+define void @vstu2dncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstu2dncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1243,13 +1243,13 @@ define void @vstu2dncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstu2d.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2d_vssl(i8* %0, i64 %1) {
+define void @vstl2d_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2d_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1257,16 +1257,16 @@ define void @vstl2d_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2d.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstl2d.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2d_vssml(i8* %0, i64 %1) {
+define void @vstl2d_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2d_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1274,16 +1274,16 @@ define void @vstl2d_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2d.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstl2d.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2d_vssl_imm(i8* %0) {
+define void @vstl2d_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstl2d_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1291,13 +1291,13 @@ define void @vstl2d_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2d_vssml_imm(i8* %0) {
+define void @vstl2d_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstl2d_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1305,13 +1305,13 @@ define void @vstl2d_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dnc_vssl(i8* %0, i64 %1) {
+define void @vstl2dnc_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dnc_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1319,16 +1319,16 @@ define void @vstl2dnc_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.nc %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dnc.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstl2dnc.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dnc_vssml(i8* %0, i64 %1) {
+define void @vstl2dnc_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dnc_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1336,16 +1336,16 @@ define void @vstl2dnc_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.nc %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dnc.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstl2dnc.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dnc_vssl_imm(i8* %0) {
+define void @vstl2dnc_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dnc_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1353,13 +1353,13 @@ define void @vstl2dnc_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.nc %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dnc_vssml_imm(i8* %0) {
+define void @vstl2dnc_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dnc_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1367,13 +1367,13 @@ define void @vstl2dnc_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.nc %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dot_vssl(i8* %0, i64 %1) {
+define void @vstl2dot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1381,16 +1381,16 @@ define void @vstl2dot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstl2dot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dot_vssml(i8* %0, i64 %1) {
+define void @vstl2dot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1398,16 +1398,16 @@ define void @vstl2dot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstl2dot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dot_vssl_imm(i8* %0) {
+define void @vstl2dot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1415,13 +1415,13 @@ define void @vstl2dot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dot_vssml_imm(i8* %0) {
+define void @vstl2dot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1429,13 +1429,13 @@ define void @vstl2dot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dncot_vssl(i8* %0, i64 %1) {
+define void @vstl2dncot_vssl(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dncot_vssl:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1443,16 +1443,16 @@ define void @vstl2dncot_vssl(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.nc.ot %v0, %s1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %3, i64 %1, i8* %0, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %3, i64 %1, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dncot.vssl(<256 x double>, i64, i8*, i32)
+declare void @llvm.ve.vl.vstl2dncot.vssl(<256 x double>, i64, ptr, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dncot_vssml(i8* %0, i64 %1) {
+define void @vstl2dncot_vssml(ptr %0, i64 %1) {
 ; CHECK-LABEL: vstl2dncot_vssml:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s2, 256
@@ -1460,16 +1460,16 @@ define void @vstl2dncot_vssml(i8* %0, i64 %1) {
 ; CHECK-NEXT:    vld %v0, %s1, %s0
 ; CHECK-NEXT:    vstl2d.nc.ot %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %3, i64 %1, i8* %0, <256 x i1> undef, i32 256)
+  %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %3, i64 %1, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind writeonly
-declare void @llvm.ve.vl.vstl2dncot.vssml(<256 x double>, i64, i8*, <256 x i1>, i32)
+declare void @llvm.ve.vl.vstl2dncot.vssml(<256 x double>, i64, ptr, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define void @vstl2dncot_vssl_imm(i8* %0) {
+define void @vstl2dncot_vssl_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dncot_vssl_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1477,13 +1477,13 @@ define void @vstl2dncot_vssl_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.nc.ot %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %2, i64 8, i8* %0, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %2, i64 8, ptr %0, i32 256)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @vstl2dncot_vssml_imm(i8* %0) {
+define void @vstl2dncot_vssml_imm(ptr %0) {
 ; CHECK-LABEL: vstl2dncot_vssml_imm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -1491,7 +1491,7 @@ define void @vstl2dncot_vssml_imm(i8* %0) {
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    vstl2d.nc.ot %v0, 8, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
-  tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %2, i64 8, i8* %0, <256 x i1> undef, i32 256)
+  %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
+  tail call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %2, i64 8, ptr %0, <256 x i1> undef, i32 256)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/loadvm.ll b/llvm/test/CodeGen/VE/Vector/loadvm.ll
index 8d82821fc09b2..4cd21ba20000a 100644
--- a/llvm/test/CodeGen/VE/Vector/loadvm.ll
+++ b/llvm/test/CodeGen/VE/Vector/loadvm.ll
@@ -5,7 +5,7 @@
 @v512i1 = common dso_local local_unnamed_addr global <512 x i1> zeroinitializer, align 4
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <256 x i1> @loadv256i1(<256 x i1>* nocapture readonly %mp) {
+define fastcc <256 x i1> @loadv256i1(ptr nocapture readonly %mp) {
 ; CHECK-LABEL: loadv256i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s1, (, %s0)
@@ -17,7 +17,7 @@ define fastcc <256 x i1> @loadv256i1(<256 x i1>* nocapture readonly %mp) {
 ; CHECK-NEXT:    lvm %vm1, 2, %s3
 ; CHECK-NEXT:    lvm %vm1, 3, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %m = load <256 x i1>, <256 x i1>* %mp, align 16
+  %m = load <256 x i1>, ptr %mp, align 16
   ret <256 x i1> %m
 }
 
@@ -37,12 +37,12 @@ define fastcc <256 x i1> @loadv256i1com() {
 ; CHECK-NEXT:    lvm %vm1, 2, %s3
 ; CHECK-NEXT:    lvm %vm1, 3, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %m = load <256 x i1>, <256 x i1>* @v256i1, align 16
+  %m = load <256 x i1>, ptr @v256i1, align 16
   ret <256 x i1> %m
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <512 x i1> @loadv512i1(<512 x i1>* nocapture readonly %mp) {
+define fastcc <512 x i1> @loadv512i1(ptr nocapture readonly %mp) {
 ; CHECK-LABEL: loadv512i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld %s1, (, %s0)
@@ -62,7 +62,7 @@ define fastcc <512 x i1> @loadv512i1(<512 x i1>* nocapture readonly %mp) {
 ; CHECK-NEXT:    lvm %vm2, 2, %s3
 ; CHECK-NEXT:    lvm %vm2, 3, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %m = load <512 x i1>, <512 x i1>* %mp, align 16
+  %m = load <512 x i1>, ptr %mp, align 16
   ret <512 x i1> %m
 }
 
@@ -90,7 +90,7 @@ define fastcc <512 x i1> @loadv512i1com() {
 ; CHECK-NEXT:    lvm %vm2, 2, %s3
 ; CHECK-NEXT:    lvm %vm2, 3, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %m = load <512 x i1>, <512 x i1>* @v512i1, align 16
+  %m = load <512 x i1>, ptr @v512i1, align 16
   ret <512 x i1> %m
 }
 

diff  --git a/llvm/test/CodeGen/VE/Vector/loadvr.ll b/llvm/test/CodeGen/VE/Vector/loadvr.ll
index d26e014027edf..a7ab9fba7b845 100644
--- a/llvm/test/CodeGen/VE/Vector/loadvr.ll
+++ b/llvm/test/CodeGen/VE/Vector/loadvr.ll
@@ -4,50 +4,50 @@
 @v256i64 = common dso_local local_unnamed_addr global <256 x i64> zeroinitializer, align 16
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <256 x i64> @loadv256i64(<256 x i64>* nocapture readonly) {
+define fastcc <256 x i64> @loadv256i64(ptr nocapture readonly) {
 ; CHECK-LABEL: loadv256i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load <256 x i64>, <256 x i64>* %0, align 16
+  %2 = load <256 x i64>, ptr %0, align 16
   ret <256 x i64> %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <256 x double> @loadv256f64(<256 x double>* nocapture readonly) {
+define fastcc <256 x double> @loadv256f64(ptr nocapture readonly) {
 ; CHECK-LABEL: loadv256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load <256 x double>, <256 x double>* %0, align 16
+  %2 = load <256 x double>, ptr %0, align 16
   ret <256 x double> %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <256 x i32> @loadv256i32(<256 x i32>* nocapture readonly) {
+define fastcc <256 x i32> @loadv256i32(ptr nocapture readonly) {
 ; CHECK-LABEL: loadv256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vldl.zx %v0, 4, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load <256 x i32>, <256 x i32>* %0, align 16
+  %2 = load <256 x i32>, ptr %0, align 16
   ret <256 x i32> %2
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc <256 x float> @loadv256f32(<256 x float>* nocapture readonly) {
+define fastcc <256 x float> @loadv256f32(ptr nocapture readonly) {
 ; CHECK-LABEL: loadv256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vldu %v0, 4, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %2 = load <256 x float>, <256 x float>* %0, align 16
+  %2 = load <256 x float>, ptr %0, align 16
   ret <256 x float> %2
 }
 
@@ -74,7 +74,7 @@ define fastcc <256 x i64> @loadv256i64stk() {
 ; CHECK-NEXT:    lea %s11, 2048(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca <256 x i64>, align 16
-  %1 = load <256 x i64>, <256 x i64>* %addr, align 16
+  %1 = load <256 x i64>, ptr %addr, align 16
   ret <256 x i64> %1
 }
 
@@ -89,6 +89,6 @@ define fastcc <256 x i64> @loadv256i64com() {
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %1 = load <256 x i64>, <256 x i64>* @v256i64, align 16
+  %1 = load <256 x i64>, ptr @v256i64, align 16
   ret <256 x i64> %1
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/storevm.ll b/llvm/test/CodeGen/VE/Vector/storevm.ll
index 448dc2d4bd254..51e688b4ca36d 100644
--- a/llvm/test/CodeGen/VE/Vector/storevm.ll
+++ b/llvm/test/CodeGen/VE/Vector/storevm.ll
@@ -5,7 +5,7 @@
 @v512i1 = common dso_local local_unnamed_addr global <512 x i1> zeroinitializer, align 4
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc void @storev256i1(<256 x i1>* nocapture %mp, <256 x i1> %m) {
+define fastcc void @storev256i1(ptr nocapture %mp, <256 x i1> %m) {
 ; CHECK-LABEL: storev256i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    svm %s1, %vm1, 3
@@ -17,7 +17,7 @@ define fastcc void @storev256i1(<256 x i1>* nocapture %mp, <256 x i1> %m) {
 ; CHECK-NEXT:    svm %s1, %vm1, 0
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <256 x i1> %m, <256 x i1>* %mp, align 16
+  store <256 x i1> %m, ptr %mp, align 16
   ret void
 }
 
@@ -37,12 +37,12 @@ define fastcc void @storev256i1com(<256 x i1> %m) {
 ; CHECK-NEXT:    svm %s0, %vm1, 0
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <256 x i1> %m, <256 x i1>* @v256i1, align 16
+  store <256 x i1> %m, ptr @v256i1, align 16
   ret void
 }
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc void @storev512i1(<512 x i1>* nocapture %mp, <512 x i1> %m) {
+define fastcc void @storev512i1(ptr nocapture %mp, <512 x i1> %m) {
 ; CHECK-LABEL: storev512i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    svm %s1, %vm2, 3
@@ -62,7 +62,7 @@ define fastcc void @storev512i1(<512 x i1>* nocapture %mp, <512 x i1> %m) {
 ; CHECK-NEXT:    svm %s1, %vm3, 0
 ; CHECK-NEXT:    st %s1, (, %s0)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <512 x i1> %m, <512 x i1>* %mp, align 16
+  store <512 x i1> %m, ptr %mp, align 16
   ret void
 }
 
@@ -90,6 +90,6 @@ define fastcc void @storev512i1com(<512 x i1> %m) {
 ; CHECK-NEXT:    svm %s0, %vm3, 0
 ; CHECK-NEXT:    st %s0, (, %s1)
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <512 x i1> %m, <512 x i1>* @v512i1, align 16
+  store <512 x i1> %m, ptr @v512i1, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/storevr.ll b/llvm/test/CodeGen/VE/Vector/storevr.ll
index 5b8e3effa4321..d5607f04a7830 100644
--- a/llvm/test/CodeGen/VE/Vector/storevr.ll
+++ b/llvm/test/CodeGen/VE/Vector/storevr.ll
@@ -4,14 +4,14 @@
 @v256i64 = common dso_local local_unnamed_addr global <256 x i64> zeroinitializer, align 16
 
 ; Function Attrs: norecurse nounwind readonly
-define fastcc void @storev256i64(<256 x i64>* nocapture, <256 x i64>) {
+define fastcc void @storev256i64(ptr nocapture, <256 x i64>) {
 ; CHECK-LABEL: storev256i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <256 x i64> %1, <256 x i64>* %0, align 16
+  store <256 x i64> %1, ptr %0, align 16
   ret void
 }
 
@@ -38,7 +38,7 @@ define fastcc void @storev256i64stk(<256 x i64>) {
 ; CHECK-NEXT:    lea %s11, 2048(, %s11)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %addr = alloca <256 x i64>, align 16
-  store <256 x i64> %0, <256 x i64>* %addr, align 16
+  store <256 x i64> %0, ptr %addr, align 16
   ret void
 }
 
@@ -53,6 +53,6 @@ define fastcc void @storev256i64com(<256 x i64>) {
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  store <256 x i64> %0, <256 x i64>* @v256i64, align 16
+  store <256 x i64> %0, ptr @v256i64, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_gather.ll b/llvm/test/CodeGen/VE/Vector/vec_gather.ll
index e215c8e427878..f22722dcc0203 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_gather.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_gather.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare <256 x double> @llvm.masked.gather.v256f64.v256p0f64(<256 x double*> %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0
+declare <256 x double> @llvm.masked.gather.v256f64.v256p0(<256 x ptr> %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vec_mgather_v256f64(<256 x double*> %P, <256 x i1> %M) {
+define fastcc <256 x double> @vec_mgather_v256f64(<256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.masked.gather.v256f64.v256p0f64(<256 x double*> %P, i32 4, <256 x i1> %M, <256 x double> undef)
+  %r = call <256 x double> @llvm.masked.gather.v256f64.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x double> undef)
   ret <256 x double> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vec_mgather_pt_v256f64(<256 x double*> %P, <256 x double> %PT, <256 x i1> %M) {
+define fastcc <256 x double> @vec_mgather_pt_v256f64(<256 x ptr> %P, <256 x double> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_pt_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -28,27 +28,27 @@ define fastcc <256 x double> @vec_mgather_pt_v256f64(<256 x double*> %P, <256 x
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vmrg %v0, %v1, %v2, %vm0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.masked.gather.v256f64.v256p0f64(<256 x double*> %P, i32 4, <256 x i1> %M, <256 x double> %PT)
+  %r = call <256 x double> @llvm.masked.gather.v256f64.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x double> %PT)
   ret <256 x double> %r
 }
 
 
-declare <256 x float> @llvm.masked.gather.v256f32.v256p0f32(<256 x float*> %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0
+declare <256 x float> @llvm.masked.gather.v256f32.v256p0(<256 x ptr> %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x float> @vec_mgather_v256f32(<256 x float*> %P, <256 x i1> %M) {
+define fastcc <256 x float> @vec_mgather_v256f32(<256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgtu %v0, %v0, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.masked.gather.v256f32.v256p0f32(<256 x float*> %P, i32 4, <256 x i1> %M, <256 x float> undef)
+  %r = call <256 x float> @llvm.masked.gather.v256f32.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x float> undef)
   ret <256 x float> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x float> @vec_mgather_pt_v256f32(<256 x float*> %P, <256 x float> %PT, <256 x i1> %M) {
+define fastcc <256 x float> @vec_mgather_pt_v256f32(<256 x ptr> %P, <256 x float> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_pt_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -60,27 +60,27 @@ define fastcc <256 x float> @vec_mgather_pt_v256f32(<256 x float*> %P, <256 x fl
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vmrg %v0, %v1, %v2, %vm0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.masked.gather.v256f32.v256p0f32(<256 x float*> %P, i32 4, <256 x i1> %M, <256 x float> %PT)
+  %r = call <256 x float> @llvm.masked.gather.v256f32.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x float> %PT)
   ret <256 x float> %r
 }
 
 
-declare <256 x i32> @llvm.masked.gather.v256i32.v256p0i32(<256 x i32*> %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0
+declare <256 x i32> @llvm.masked.gather.v256i32.v256p0(<256 x ptr> %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x i32> @vec_mgather_v256i32(<256 x i32*> %P, <256 x i1> %M) {
+define fastcc <256 x i32> @vec_mgather_v256i32(<256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgtl.zx %v0, %v0, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.masked.gather.v256i32.v256p0i32(<256 x i32*> %P, i32 4, <256 x i1> %M, <256 x i32> undef)
+  %r = call <256 x i32> @llvm.masked.gather.v256i32.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x i32> undef)
   ret <256 x i32> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x i32> @vec_mgather_pt_v256i32(<256 x i32*> %P, <256 x i32> %PT, <256 x i1> %M) {
+define fastcc <256 x i32> @vec_mgather_pt_v256i32(<256 x ptr> %P, <256 x i32> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mgather_pt_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
@@ -92,7 +92,7 @@ define fastcc <256 x i32> @vec_mgather_pt_v256i32(<256 x i32*> %P, <256 x i32> %
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vmrg %v0, %v1, %v2, %vm0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.masked.gather.v256i32.v256p0i32(<256 x i32*> %P, i32 4, <256 x i1> %M, <256 x i32> %PT)
+  %r = call <256 x i32> @llvm.masked.gather.v256i32.v256p0(<256 x ptr> %P, i32 4, <256 x i1> %M, <256 x i32> %PT)
   ret <256 x i32> %r
 }
 

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_load.ll b/llvm/test/CodeGen/VE/Vector/vec_load.ll
index 69af962b33b40..d72c1cccbbdc3 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_load.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_load.ll
@@ -1,20 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare <128 x double> @llvm.masked.load.v128f64.p0v128f64(<128 x double>* %0, i32 immarg %1, <128 x i1> %2, <128 x double> %3) #0
+declare <128 x double> @llvm.masked.load.v128f64.p0(ptr %0, i32 immarg %1, <128 x i1> %2, <128 x double> %3) #0
 
 ; TODO: Custom widen by lowering to vvp_load in ReplaceNodeResult
 ; Function Attrs: nounwind
-; define fastcc <128 x double> @vec_mload_v128f64(<128 x double>* %P, <128 x i1> %M) {
-;   %r = call <128 x double> @llvm.masked.load.v128f64.p0v128f64(<128 x double>* %P, i32 16, <128 x i1> %M, <128 x double> undef)
+; define fastcc <128 x double> @vec_mload_v128f64(ptr %P, <128 x i1> %M) {
+;   %r = call <128 x double> @llvm.masked.load.v128f64.p0(ptr %P, i32 16, <128 x i1> %M, <128 x double> undef)
 ;   ret <128 x double> %r
 ; }
 
 
-declare <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0
+declare <256 x double> @llvm.masked.load.v256f64.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vec_mload_v256f64(<256 x double>* %P, <256 x i1> %M) {
+define fastcc <256 x double> @vec_mload_v256f64(ptr %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -24,24 +24,24 @@ define fastcc <256 x double> @vec_mload_v256f64(<256 x double>* %P, <256 x i1> %
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %P, i32 16, <256 x i1> %M, <256 x double> undef)
+  %r = call <256 x double> @llvm.masked.load.v256f64.p0(ptr %P, i32 16, <256 x i1> %M, <256 x double> undef)
   ret <256 x double> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vec_load_v256f64(<256 x double>* %P) {
+define fastcc <256 x double> @vec_load_v256f64(ptr %P) {
 ; CHECK-LABEL: vec_load_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vld %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = load <256 x double>, <256 x double>* %P, align 4
+  %r = load <256 x double>, ptr %P, align 4
   ret <256 x double> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vec_mload_pt_v256f64(<256 x double>* %P, <256 x double> %PT, <256 x i1> %M) {
+define fastcc <256 x double> @vec_mload_pt_v256f64(ptr %P, <256 x double> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_pt_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -52,15 +52,15 @@ define fastcc <256 x double> @vec_mload_pt_v256f64(<256 x double>* %P, <256 x do
 ; CHECK-NEXT:    vgt %v1, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.masked.load.v256f64.p0v256f64(<256 x double>* %P, i32 16, <256 x i1> %M, <256 x double> %PT)
+  %r = call <256 x double> @llvm.masked.load.v256f64.p0(ptr %P, i32 16, <256 x i1> %M, <256 x double> %PT)
   ret <256 x double> %r
 }
 
 
-declare <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0
+declare <256 x float> @llvm.masked.load.v256f32.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x float> @vec_mload_v256f32(<256 x float>* %P, <256 x i1> %M) {
+define fastcc <256 x float> @vec_mload_v256f32(ptr %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -70,12 +70,12 @@ define fastcc <256 x float> @vec_mload_v256f32(<256 x float>* %P, <256 x i1> %M)
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgtu %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %P, i32 16, <256 x i1> %M, <256 x float> undef)
+  %r = call <256 x float> @llvm.masked.load.v256f32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x float> undef)
   ret <256 x float> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x float> @vec_mload_pt_v256f32(<256 x float>* %P, <256 x float> %PT, <256 x i1> %M) {
+define fastcc <256 x float> @vec_mload_pt_v256f32(ptr %P, <256 x float> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_pt_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -86,15 +86,15 @@ define fastcc <256 x float> @vec_mload_pt_v256f32(<256 x float>* %P, <256 x floa
 ; CHECK-NEXT:    vgtu %v1, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.masked.load.v256f32.p0v256f32(<256 x float>* %P, i32 16, <256 x i1> %M, <256 x float> %PT)
+  %r = call <256 x float> @llvm.masked.load.v256f32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x float> %PT)
   ret <256 x float> %r
 }
 
 
-declare <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0
+declare <256 x i32> @llvm.masked.load.v256i32.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0
 
 ; Function Attrs: nounwind
-define fastcc <256 x i32> @vec_mload_v256i32(<256 x i32>* %P, <256 x i1> %M) {
+define fastcc <256 x i32> @vec_mload_v256i32(ptr %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -104,12 +104,12 @@ define fastcc <256 x i32> @vec_mload_v256i32(<256 x i32>* %P, <256 x i1> %M) {
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgtl.zx %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %P, i32 16, <256 x i1> %M, <256 x i32> undef)
+  %r = call <256 x i32> @llvm.masked.load.v256i32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x i32> undef)
   ret <256 x i32> %r
 }
 
 ; Function Attrs: nounwind
-define fastcc <256 x i32> @vec_mload_pt_v256i32(<256 x i32>* %P, <256 x i32> %PT, <256 x i1> %M) {
+define fastcc <256 x i32> @vec_mload_pt_v256i32(ptr %P, <256 x i32> %PT, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mload_pt_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
@@ -120,7 +120,7 @@ define fastcc <256 x i32> @vec_mload_pt_v256i32(<256 x i32>* %P, <256 x i32> %PT
 ; CHECK-NEXT:    vgtl.zx %v1, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.masked.load.v256i32.p0v256i32(<256 x i32>* %P, i32 16, <256 x i1> %M, <256 x i32> %PT)
+  %r = call <256 x i32> @llvm.masked.load.v256i32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x i32> %PT)
   ret <256 x i32> %r
 }
 

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_scatter.ll b/llvm/test/CodeGen/VE/Vector/vec_scatter.ll
index 4fe19dbb3ff48..93e0c57b09058 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_scatter.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_scatter.ll
@@ -1,59 +1,59 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare void @llvm.masked.scatter.v256i64.v256p0i64(<256 x i64>, <256 x i64*>, i32 immarg, <256 x i1>) #0
+declare void @llvm.masked.scatter.v256i64.v256p0(<256 x i64>, <256 x ptr>, i32 immarg, <256 x i1>) #0
 
 ; Function Attrs: nounwind
-define fastcc void @vec_mscatter_v256i64(<256 x i64> %V, <256 x i64*> %P, <256 x i1> %M) {
+define fastcc void @vec_mscatter_v256i64(<256 x i64> %V, <256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mscatter_v256i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vsc %v0, %v1, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.scatter.v256i64.v256p0i64(<256 x i64> %V, <256 x i64*> %P, i32 4, <256 x i1> %M)
+  call void @llvm.masked.scatter.v256i64.v256p0(<256 x i64> %V, <256 x ptr> %P, i32 4, <256 x i1> %M)
   ret void
 }
 
-declare void @llvm.masked.scatter.v256f64.v256p0f64(<256 x double>, <256 x double*>, i32 immarg, <256 x i1>) #0
+declare void @llvm.masked.scatter.v256f64.v256p0(<256 x double>, <256 x ptr>, i32 immarg, <256 x i1>) #0
 
 ; Function Attrs: nounwind
-define fastcc void @vec_mscatter_v256f64(<256 x double> %V, <256 x double*> %P, <256 x i1> %M) {
+define fastcc void @vec_mscatter_v256f64(<256 x double> %V, <256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mscatter_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vsc %v0, %v1, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.scatter.v256f64.v256p0f64(<256 x double> %V, <256 x double*> %P, i32 4, <256 x i1> %M)
+  call void @llvm.masked.scatter.v256f64.v256p0(<256 x double> %V, <256 x ptr> %P, i32 4, <256 x i1> %M)
   ret void
 }
 
-declare void @llvm.masked.scatter.v256f32.v256p0f32(<256 x float>, <256 x float*>, i32 immarg, <256 x i1>) #0
+declare void @llvm.masked.scatter.v256f32.v256p0(<256 x float>, <256 x ptr>, i32 immarg, <256 x i1>) #0
 
 ; Function Attrs: nounwind
-define fastcc void @vec_mscatter_v256f32(<256 x float> %V, <256 x float*> %P, <256 x i1> %M) {
+define fastcc void @vec_mscatter_v256f32(<256 x float> %V, <256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mscatter_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vscu %v0, %v1, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.scatter.v256f32.v256p0f32(<256 x float> %V, <256 x float*> %P, i32 4, <256 x i1> %M)
+  call void @llvm.masked.scatter.v256f32.v256p0(<256 x float> %V, <256 x ptr> %P, i32 4, <256 x i1> %M)
   ret void
 }
 
-declare void @llvm.masked.scatter.v256i32.v256p0i32(<256 x i32>, <256 x i32*>, i32 immarg, <256 x i1>) #0
+declare void @llvm.masked.scatter.v256i32.v256p0(<256 x i32>, <256 x ptr>, i32 immarg, <256 x i1>) #0
 
 ; Function Attrs: nounwind
-define fastcc void @vec_mscatter_v256i32(<256 x i32> %V, <256 x i32*> %P, <256 x i1> %M) {
+define fastcc void @vec_mscatter_v256i32(<256 x i32> %V, <256 x ptr> %P, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mscatter_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s0, 256
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vscl %v0, %v1, 0, 0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.scatter.v256i32.v256p0i32(<256 x i32> %V, <256 x i32*> %P, i32 4, <256 x i1> %M)
+  call void @llvm.masked.scatter.v256i32.v256p0(<256 x i32> %V, <256 x ptr> %P, i32 4, <256 x i1> %M)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_store.ll b/llvm/test/CodeGen/VE/Vector/vec_store.ll
index a80d1a12d21be..68980e8e62fef 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_store.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_store.ll
@@ -1,43 +1,43 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare void @llvm.masked.store.v256f64.p0v256f64(<256 x double>, <256 x double>*, i32 immarg, <256 x i1>)
+declare void @llvm.masked.store.v256f64.p0(<256 x double>, ptr, i32 immarg, <256 x i1>)
 
-define fastcc void @vec_mstore_v256f64(<256 x double>* %P, <256 x double> %V, <256 x i1> %M) {
+define fastcc void @vec_mstore_v256f64(ptr %P, <256 x double> %V, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mstore_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vst %v0, 8, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.store.v256f64.p0v256f64(<256 x double> %V, <256 x double>* %P, i32 16, <256 x i1> %M)
+  call void @llvm.masked.store.v256f64.p0(<256 x double> %V, ptr %P, i32 16, <256 x i1> %M)
   ret void
 }
 
 
-declare void @llvm.masked.store.v256f32.p0v256f32(<256 x float>, <256 x float>*, i32 immarg, <256 x i1>)
+declare void @llvm.masked.store.v256f32.p0(<256 x float>, ptr, i32 immarg, <256 x i1>)
 
-define fastcc void @vec_mstore_v256f32(<256 x float>* %P, <256 x float> %V, <256 x i1> %M) {
+define fastcc void @vec_mstore_v256f32(ptr %P, <256 x float> %V, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mstore_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vstu %v0, 4, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.store.v256f32.p0v256f32(<256 x float> %V, <256 x float>* %P, i32 16, <256 x i1> %M)
+  call void @llvm.masked.store.v256f32.p0(<256 x float> %V, ptr %P, i32 16, <256 x i1> %M)
   ret void
 }
 
 
-declare void @llvm.masked.store.v256i32.p0v256i32(<256 x i32>, <256 x i32>*, i32 immarg, <256 x i1>)
+declare void @llvm.masked.store.v256i32.p0(<256 x i32>, ptr, i32 immarg, <256 x i1>)
 
-define fastcc void @vec_mstore_v256i32(<256 x i32>* %P, <256 x i32> %V, <256 x i1> %M) {
+define fastcc void @vec_mstore_v256i32(ptr %P, <256 x i32> %V, <256 x i1> %M) {
 ; CHECK-LABEL: vec_mstore_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    lea %s1, 256
 ; CHECK-NEXT:    lvl %s1
 ; CHECK-NEXT:    vstl %v0, 4, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.masked.store.v256i32.p0v256i32(<256 x i32> %V, <256 x i32>* %P, i32 16, <256 x i1> %M)
+  call void @llvm.masked.store.v256i32.p0(<256 x i32> %V, ptr %P, i32 16, <256 x i1> %M)
   ret void
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/vp_gather.ll b/llvm/test/CodeGen/VE/Vector/vp_gather.ll
index 39caf42f9da8a..ef549c5860616 100644
--- a/llvm/test/CodeGen/VE/Vector/vp_gather.ll
+++ b/llvm/test/CodeGen/VE/Vector/vp_gather.ll
@@ -1,58 +1,58 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare <256 x i64> @llvm.vp.gather.v256i64.v256p0i64(<256 x i64*>, <256 x i1>, i32)
+declare <256 x i64> @llvm.vp.gather.v256i64.v256p0(<256 x ptr>, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define fastcc <256 x i64> @vp_gather_v256i64(<256 x i64*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc <256 x i64> @vp_gather_v256i64(<256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_gather_v256i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i64> @llvm.vp.gather.v256i64.v256p0i64(<256 x i64*> %P, <256 x i1> %M, i32 %avl)
+  %r = call <256 x i64> @llvm.vp.gather.v256i64.v256p0(<256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret <256 x i64> %r
 }
 
-declare <256 x double> @llvm.vp.gather.v256f64.v256p0f64(<256 x double*>, <256 x i1>, i32)
+declare <256 x double> @llvm.vp.gather.v256f64.v256p0(<256 x ptr>, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define fastcc <256 x double> @vp_gather_v256f64(<256 x double*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc <256 x double> @vp_gather_v256f64(<256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_gather_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.vp.gather.v256f64.v256p0f64(<256 x double*> %P, <256 x i1> %M, i32 %avl)
+  %r = call <256 x double> @llvm.vp.gather.v256f64.v256p0(<256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret <256 x double> %r
 }
 
-declare <256 x float> @llvm.vp.gather.v256f32.v256p0f32(<256 x float*>, <256 x i1>, i32)
+declare <256 x float> @llvm.vp.gather.v256f32.v256p0(<256 x ptr>, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define fastcc <256 x float> @vp_gather_v256f32(<256 x float*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc <256 x float> @vp_gather_v256f32(<256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_gather_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgtu %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.vp.gather.v256f32.v256p0f32(<256 x float*> %P, <256 x i1> %M, i32 %avl)
+  %r = call <256 x float> @llvm.vp.gather.v256f32.v256p0(<256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret <256 x float> %r
 }
 
-declare <256 x i32> @llvm.vp.gather.v256i32.v256p0i32(<256 x i32*>, <256 x i1>, i32)
+declare <256 x i32> @llvm.vp.gather.v256i32.v256p0(<256 x ptr>, <256 x i1>, i32)
 
 ; Function Attrs: nounwind
-define fastcc <256 x i32> @vp_gather_v256i32(<256 x i32*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc <256 x i32> @vp_gather_v256i32(<256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_gather_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vgtl.zx %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.vp.gather.v256i32.v256p0i32(<256 x i32*> %P, <256 x i1> %M, i32 %avl)
+  %r = call <256 x i32> @llvm.vp.gather.v256i32.v256p0(<256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret <256 x i32> %r
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/vp_scatter.ll b/llvm/test/CodeGen/VE/Vector/vp_scatter.ll
index d748f8f270461..fd4222013e27d 100644
--- a/llvm/test/CodeGen/VE/Vector/vp_scatter.ll
+++ b/llvm/test/CodeGen/VE/Vector/vp_scatter.ll
@@ -1,59 +1,59 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare void @llvm.vp.scatter.v256i64.v256p0i64(<256 x i64>, <256 x i64*>, <256 x i1>, i32 %avl)
+declare void @llvm.vp.scatter.v256i64.v256p0(<256 x i64>, <256 x ptr>, <256 x i1>, i32 %avl)
 
 ; Function Attrs: nounwind
-define fastcc void @vp_mscatter_v256i64(<256 x i64> %V, <256 x i64*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc void @vp_mscatter_v256i64(<256 x i64> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_mscatter_v256i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vsc %v0, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.vp.scatter.v256i64.v256p0i64(<256 x i64> %V, <256 x i64*> %P, <256 x i1> %M, i32 %avl)
+  call void @llvm.vp.scatter.v256i64.v256p0(<256 x i64> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret void
 }
 
-declare void @llvm.vp.scatter.v256f64.v256p0f64(<256 x double>, <256 x double*>, <256 x i1>, i32 %avl)
+declare void @llvm.vp.scatter.v256f64.v256p0(<256 x double>, <256 x ptr>, <256 x i1>, i32 %avl)
 
 ; Function Attrs: nounwind
-define fastcc void @vp_mscatter_v256f64(<256 x double> %V, <256 x double*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc void @vp_mscatter_v256f64(<256 x double> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_mscatter_v256f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vsc %v0, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.vp.scatter.v256f64.v256p0f64(<256 x double> %V, <256 x double*> %P, <256 x i1> %M, i32 %avl)
+  call void @llvm.vp.scatter.v256f64.v256p0(<256 x double> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret void
 }
 
-declare void @llvm.vp.scatter.v256f32.v256p0f32(<256 x float>, <256 x float*>, <256 x i1>, i32 %avl)
+declare void @llvm.vp.scatter.v256f32.v256p0(<256 x float>, <256 x ptr>, <256 x i1>, i32 %avl)
 
 ; Function Attrs: nounwind
-define fastcc void @vp_mscatter_v256f32(<256 x float> %V, <256 x float*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc void @vp_mscatter_v256f32(<256 x float> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_mscatter_v256f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vscu %v0, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.vp.scatter.v256f32.v256p0f32(<256 x float> %V, <256 x float*> %P, <256 x i1> %M, i32 %avl)
+  call void @llvm.vp.scatter.v256f32.v256p0(<256 x float> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret void
 }
 
-declare void @llvm.vp.scatter.v256i32.v256p0i32(<256 x i32>, <256 x i32*>, <256 x i1>, i32 %avl)
+declare void @llvm.vp.scatter.v256i32.v256p0(<256 x i32>, <256 x ptr>, <256 x i1>, i32 %avl)
 
 ; Function Attrs: nounwind
-define fastcc void @vp_mscatter_v256i32(<256 x i32> %V, <256 x i32*> %P, <256 x i1> %M, i32 %avl) {
+define fastcc void @vp_mscatter_v256i32(<256 x i32> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl) {
 ; CHECK-LABEL: vp_mscatter_v256i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    lvl %s0
 ; CHECK-NEXT:    vscl %v0, %v1, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.vp.scatter.v256i32.v256p0i32(<256 x i32> %V, <256 x i32*> %P, <256 x i1> %M, i32 %avl)
+  call void @llvm.vp.scatter.v256i32.v256p0(<256 x i32> %V, <256 x ptr> %P, <256 x i1> %M, i32 %avl)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/VE/Vector/vp_strided_load.ll b/llvm/test/CodeGen/VE/Vector/vp_strided_load.ll
index 48b35cee34906..0304bc287e66f 100644
--- a/llvm/test/CodeGen/VE/Vector/vp_strided_load.ll
+++ b/llvm/test/CodeGen/VE/Vector/vp_strided_load.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0f32.i64(float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc <256 x float> @vp_strided_load_v256f32_rrm(float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc <256 x float> @vp_strided_load_v256f32_rrm(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f32_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -13,11 +13,11 @@ define fastcc <256 x float> @vp_strided_load_v256f32_rrm(float* %ptr, i64 %strid
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgtu %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0f32.i64(float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret <256 x float> %r
 }
 
-define fastcc <256 x float> @vp_strided_load_v256f32_rr(float* %ptr, i64 %stride, i32 %evl) {
+define fastcc <256 x float> @vp_strided_load_v256f32_rr(ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f32_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -26,11 +26,11 @@ define fastcc <256 x float> @vp_strided_load_v256f32_rr(float* %ptr, i64 %stride
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0f32.i64(float* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret <256 x float> %r
 }
 
-define fastcc <256 x float> @vp_strided_load_v256f32_ri(float* %ptr, i32 %evl) {
+define fastcc <256 x float> @vp_strided_load_v256f32_ri(ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f32_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -39,13 +39,13 @@ define fastcc <256 x float> @vp_strided_load_v256f32_ri(float* %ptr, i32 %evl) {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0f32.i64(float* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x float> @llvm.experimental.vp.strided.load.v256f32.p0.i64(ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret <256 x float> %r
 }
 
-declare <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0i32.i64(i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc <256 x i32> @vp_strided_load_v256i32_rrm(i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc <256 x i32> @vp_strided_load_v256i32_rrm(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i32_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -55,11 +55,11 @@ define fastcc <256 x i32> @vp_strided_load_v256i32_rrm(i32* %ptr, i64 %stride, <
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgtl.zx %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0i32.i64(i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret <256 x i32> %r
 }
 
-define fastcc <256 x i32> @vp_strided_load_v256i32_rr(i32* %ptr, i64 %stride, i32 %evl) {
+define fastcc <256 x i32> @vp_strided_load_v256i32_rr(ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i32_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -68,11 +68,11 @@ define fastcc <256 x i32> @vp_strided_load_v256i32_rr(i32* %ptr, i64 %stride, i3
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0i32.i64(i32* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret <256 x i32> %r
 }
 
-define fastcc <256 x i32> @vp_strided_load_v256i32_ri(i32* %ptr, i32 %evl) {
+define fastcc <256 x i32> @vp_strided_load_v256i32_ri(ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i32_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -81,13 +81,13 @@ define fastcc <256 x i32> @vp_strided_load_v256i32_ri(i32* %ptr, i32 %evl) {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0i32.i64(i32* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x i32> @llvm.experimental.vp.strided.load.v256i32.p0.i64(ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret <256 x i32> %r
 }
 
-declare <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0f64.i64(double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc <256 x double> @vp_strided_load_v256f64_rrm(double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc <256 x double> @vp_strided_load_v256f64_rrm(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f64_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -97,11 +97,11 @@ define fastcc <256 x double> @vp_strided_load_v256f64_rrm(double* %ptr, i64 %str
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0f64.i64(double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret <256 x double> %r
 }
 
-define fastcc <256 x double> @vp_strided_load_v256f64_rr(double* %ptr, i64 %stride, i32 %evl) {
+define fastcc <256 x double> @vp_strided_load_v256f64_rr(ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f64_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -110,11 +110,11 @@ define fastcc <256 x double> @vp_strided_load_v256f64_rr(double* %ptr, i64 %stri
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0f64.i64(double* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret <256 x double> %r
 }
 
-define fastcc <256 x double> @vp_strided_load_v256f64_ri(double* %ptr, i32 %evl) {
+define fastcc <256 x double> @vp_strided_load_v256f64_ri(ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256f64_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -123,13 +123,13 @@ define fastcc <256 x double> @vp_strided_load_v256f64_ri(double* %ptr, i32 %evl)
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0f64.i64(double* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x double> @llvm.experimental.vp.strided.load.v256f64.p0.i64(ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret <256 x double> %r
 }
 
-declare <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0i64.i64(i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc <256 x i64> @vp_strided_load_v256i64_rrm(i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc <256 x i64> @vp_strided_load_v256i64_rrm(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i64_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -139,11 +139,11 @@ define fastcc <256 x i64> @vp_strided_load_v256i64_rrm(i64* %ptr, i64 %stride, <
 ; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
 ; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0i64.i64(i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret <256 x i64> %r
 }
 
-define fastcc <256 x i64> @vp_strided_load_v256i64_rr(i64* %ptr, i64 %stride, i32 %evl) {
+define fastcc <256 x i64> @vp_strided_load_v256i64_rr(ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i64_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -152,11 +152,11 @@ define fastcc <256 x i64> @vp_strided_load_v256i64_rr(i64* %ptr, i64 %stride, i3
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0i64.i64(i64* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0.i64(ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret <256 x i64> %r
 }
 
-define fastcc <256 x i64> @vp_strided_load_v256i64_ri(i64* %ptr, i32 %evl) {
+define fastcc <256 x i64> @vp_strided_load_v256i64_ri(ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_load_v256i64_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -165,6 +165,6 @@ define fastcc <256 x i64> @vp_strided_load_v256i64_ri(i64* %ptr, i32 %evl) {
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0i64.i64(i64* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  %r = call <256 x i64> @llvm.experimental.vp.strided.load.v256i64.p0.i64(ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret <256 x i64> %r
 }

diff  --git a/llvm/test/CodeGen/VE/Vector/vp_strided_store.ll b/llvm/test/CodeGen/VE/Vector/vp_strided_store.ll
index 80ea6850e6732..2f40c6665b087 100644
--- a/llvm/test/CodeGen/VE/Vector/vp_strided_store.ll
+++ b/llvm/test/CodeGen/VE/Vector/vp_strided_store.ll
@@ -1,20 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
 
-declare void @llvm.experimental.vp.strided.store.v256f32.p0f32.i64(<256 x float> %val, float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare void @llvm.experimental.vp.strided.store.v256f32.p0.i64(<256 x float> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc void @vp_strided_store_v256f32_rrm(<256 x float> %val, float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc void @vp_strided_store_v256f32_rrm(<256 x float> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f32_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vstu %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.experimental.vp.strided.store.v256f32.p0f32.i64(<256 x float> %val, float* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f32.p0.i64(<256 x float> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256f32_rr(<256 x float> %val, float* %ptr, i64 %stride, i32 %evl) {
+define fastcc void @vp_strided_store_v256f32_rr(<256 x float> %val, ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f32_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -23,11 +23,11 @@ define fastcc void @vp_strided_store_v256f32_rr(<256 x float> %val, float* %ptr,
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256f32.p0f32.i64(<256 x float> %val, float* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f32.p0.i64(<256 x float> %val, ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256f32_ri(<256 x float> %val, float* %ptr, i32 %evl) {
+define fastcc void @vp_strided_store_v256f32_ri(<256 x float> %val, ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f32_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -36,24 +36,24 @@ define fastcc void @vp_strided_store_v256f32_ri(<256 x float> %val, float* %ptr,
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256f32.p0f32.i64(<256 x float> %val, float* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f32.p0.i64(<256 x float> %val, ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-declare void @llvm.experimental.vp.strided.store.v256i32.p0i32.i64(<256 x i32> %val, i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare void @llvm.experimental.vp.strided.store.v256i32.p0.i64(<256 x i32> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc void @vp_strided_store_v256i32_rrm(<256 x i32> %val, i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc void @vp_strided_store_v256i32_rrm(<256 x i32> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i32_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vstl %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.experimental.vp.strided.store.v256i32.p0i32.i64(<256 x i32> %val, i32* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i32.p0.i64(<256 x i32> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256i32_rr(<256 x i32> %val, i32* %ptr, i64 %stride, i32 %evl) {
+define fastcc void @vp_strided_store_v256i32_rr(<256 x i32> %val, ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i32_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -62,11 +62,11 @@ define fastcc void @vp_strided_store_v256i32_rr(<256 x i32> %val, i32* %ptr, i64
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256i32.p0i32.i64(<256 x i32> %val, i32* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i32.p0.i64(<256 x i32> %val, ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256i32_ri(<256 x i32> %val, i32* %ptr, i32 %evl) {
+define fastcc void @vp_strided_store_v256i32_ri(<256 x i32> %val, ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i32_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -75,24 +75,24 @@ define fastcc void @vp_strided_store_v256i32_ri(<256 x i32> %val, i32* %ptr, i32
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256i32.p0i32.i64(<256 x i32> %val, i32* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i32.p0.i64(<256 x i32> %val, ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-declare void @llvm.experimental.vp.strided.store.v256f64.p0f64.i64(<256 x double> %val, double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare void @llvm.experimental.vp.strided.store.v256f64.p0.i64(<256 x double> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc void @vp_strided_store_v256f64_rrm(<256 x double> %val, double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc void @vp_strided_store_v256f64_rrm(<256 x double> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f64_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vst %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.experimental.vp.strided.store.v256f64.p0f64.i64(<256 x double> %val, double* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f64.p0.i64(<256 x double> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256f64_rr(<256 x double> %val, double* %ptr, i64 %stride, i32 %evl) {
+define fastcc void @vp_strided_store_v256f64_rr(<256 x double> %val, ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f64_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -101,11 +101,11 @@ define fastcc void @vp_strided_store_v256f64_rr(<256 x double> %val, double* %pt
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256f64.p0f64.i64(<256 x double> %val, double* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f64.p0.i64(<256 x double> %val, ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256f64_ri(<256 x double> %val, double* %ptr, i32 %evl) {
+define fastcc void @vp_strided_store_v256f64_ri(<256 x double> %val, ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256f64_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -114,24 +114,24 @@ define fastcc void @vp_strided_store_v256f64_ri(<256 x double> %val, double* %pt
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256f64.p0f64.i64(<256 x double> %val, double* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256f64.p0.i64(<256 x double> %val, ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-declare void @llvm.experimental.vp.strided.store.v256i64.p0i64.i64(<256 x i64> %val, i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+declare void @llvm.experimental.vp.strided.store.v256i64.p0.i64(<256 x i64> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
 
-define fastcc void @vp_strided_store_v256i64_rrm(<256 x i64> %val, i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
+define fastcc void @vp_strided_store_v256i64_rrm(<256 x i64> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i64_rrm:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
 ; CHECK-NEXT:    lvl %s2
 ; CHECK-NEXT:    vst %v0, %s1, %s0, %vm1
 ; CHECK-NEXT:    b.l.t (, %s10)
-  call void @llvm.experimental.vp.strided.store.v256i64.p0i64.i64(<256 x i64> %val, i64* %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i64.p0.i64(<256 x i64> %val, ptr %ptr, i64 %stride, <256 x i1> %mask, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256i64_rr(<256 x i64> %val, i64* %ptr, i64 %stride, i32 %evl) {
+define fastcc void @vp_strided_store_v256i64_rr(<256 x i64> %val, ptr %ptr, i64 %stride, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i64_rr:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s2, %s2, (32)0
@@ -140,11 +140,11 @@ define fastcc void @vp_strided_store_v256i64_rr(<256 x i64> %val, i64* %ptr, i64
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256i64.p0i64.i64(<256 x i64> %val, i64* %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i64.p0.i64(<256 x i64> %val, ptr %ptr, i64 %stride, <256 x i1> %allones, i32 %evl)
   ret void
 }
 
-define fastcc void @vp_strided_store_v256i64_ri(<256 x i64> %val, i64* %ptr, i32 %evl) {
+define fastcc void @vp_strided_store_v256i64_ri(<256 x i64> %val, ptr %ptr, i32 %evl) {
 ; CHECK-LABEL: vp_strided_store_v256i64_ri:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    and %s1, %s1, (32)0
@@ -153,6 +153,6 @@ define fastcc void @vp_strided_store_v256i64_ri(<256 x i64> %val, i64* %ptr, i32
 ; CHECK-NEXT:    b.l.t (, %s10)
   %one = insertelement <256 x i1> undef, i1 1, i32 0
   %allones = shufflevector <256 x i1> %one, <256 x i1> undef, <256 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v256i64.p0i64.i64(<256 x i64> %val, i64* %ptr, i64 24, <256 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v256i64.p0.i64(<256 x i64> %val, ptr %ptr, i64 24, <256 x i1> %allones, i32 %evl)
   ret void
 }


        


More information about the llvm-commits mailing list