[llvm] b5b663a - [Thumb2] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 04:04:45 PST 2022


Author: Nikita Popov
Date: 2022-12-19T13:04:20+01:00
New Revision: b5b663aac17415625340eb29c8010832bfc4c21c

URL: https://github.com/llvm/llvm-project/commit/b5b663aac17415625340eb29c8010832bfc4c21c
DIFF: https://github.com/llvm/llvm-project/commit/b5b663aac17415625340eb29c8010832bfc4c21c.diff

LOG: [Thumb2] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
    llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
    llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
    llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
    llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
    llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
    llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
    llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
    llvm/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
    llvm/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
    llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
    llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
    llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
    llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
    llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
    llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
    llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
    llvm/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
    llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
    llvm/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
    llvm/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
    llvm/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
    llvm/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
    llvm/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll
    llvm/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
    llvm/test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll
    llvm/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
    llvm/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
    llvm/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
    llvm/test/CodeGen/Thumb2/2013-03-02-vduplane-nonconstant-source-index.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/clear-maskedinsts.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/exitcount.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/inlineasm.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-guards.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/lsr-profitable-chain.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-le-simple.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/remat-vctp.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/sibling-loops.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-disabled-in-loloops.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-add-sat.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-fabs.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-round.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-sub-sat.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-narrow.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-pattern-fail.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-unroll.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
    llvm/test/CodeGen/Thumb2/active_lane_mask.ll
    llvm/test/CodeGen/Thumb2/aligned-constants.ll
    llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
    llvm/test/CodeGen/Thumb2/aligned-spill.ll
    llvm/test/CodeGen/Thumb2/bfi.ll
    llvm/test/CodeGen/Thumb2/bicbfi.ll
    llvm/test/CodeGen/Thumb2/bti-indirect-branches.ll
    llvm/test/CodeGen/Thumb2/bti-outliner-1.ll
    llvm/test/CodeGen/Thumb2/bti-outliner-2.ll
    llvm/test/CodeGen/Thumb2/bti-outliner-cost-1.ll
    llvm/test/CodeGen/Thumb2/bti-outliner-cost-2.ll
    llvm/test/CodeGen/Thumb2/bug-subw.ll
    llvm/test/CodeGen/Thumb2/buildvector-crash.ll
    llvm/test/CodeGen/Thumb2/call-site-info-update.ll
    llvm/test/CodeGen/Thumb2/cmp-frame.ll
    llvm/test/CodeGen/Thumb2/constant-hoisting.ll
    llvm/test/CodeGen/Thumb2/constant-islands-cbz.ll
    llvm/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
    llvm/test/CodeGen/Thumb2/constant-islands.ll
    llvm/test/CodeGen/Thumb2/crash.ll
    llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
    llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
    llvm/test/CodeGen/Thumb2/emit-unwinding.ll
    llvm/test/CodeGen/Thumb2/fir.ll
    llvm/test/CodeGen/Thumb2/float-ops.ll
    llvm/test/CodeGen/Thumb2/frameless2.ll
    llvm/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
    llvm/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
    llvm/test/CodeGen/Thumb2/ifcvt-rescan-diamonds.ll
    llvm/test/CodeGen/Thumb2/inflate-regs.ll
    llvm/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
    llvm/test/CodeGen/Thumb2/large-call.ll
    llvm/test/CodeGen/Thumb2/large-stack.ll
    llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
    llvm/test/CodeGen/Thumb2/lsll0.ll
    llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
    llvm/test/CodeGen/Thumb2/m4-sched-regs.ll
    llvm/test/CodeGen/Thumb2/machine-licm.ll
    llvm/test/CodeGen/Thumb2/mve-basic.ll
    llvm/test/CodeGen/Thumb2/mve-be.ll
    llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
    llvm/test/CodeGen/Thumb2/mve-extractstore.ll
    llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
    llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
    llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind16-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind16-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
    llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll
    llvm/test/CodeGen/Thumb2/mve-gather-unused.ll
    llvm/test/CodeGen/Thumb2/mve-gatherscatter-mmo.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/idup.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/load-store.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/scatter-gather.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vshlc.ll
    llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll
    llvm/test/CodeGen/Thumb2/mve-ldst-offset.ll
    llvm/test/CodeGen/Thumb2/mve-ldst-postinc.ll
    llvm/test/CodeGen/Thumb2/mve-ldst-preinc.ll
    llvm/test/CodeGen/Thumb2/mve-ldst-regimm.ll
    llvm/test/CodeGen/Thumb2/mve-loadstore.ll
    llvm/test/CodeGen/Thumb2/mve-masked-ldst-offset.ll
    llvm/test/CodeGen/Thumb2/mve-masked-ldst-postinc.ll
    llvm/test/CodeGen/Thumb2/mve-masked-ldst-preinc.ll
    llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
    llvm/test/CodeGen/Thumb2/mve-masked-load.ll
    llvm/test/CodeGen/Thumb2/mve-masked-store-mmo.ll
    llvm/test/CodeGen/Thumb2/mve-masked-store.ll
    llvm/test/CodeGen/Thumb2/mve-memtp-branch.ll
    llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
    llvm/test/CodeGen/Thumb2/mve-minmaxi.ll
    llvm/test/CodeGen/Thumb2/mve-multivec-spill.ll
    llvm/test/CodeGen/Thumb2/mve-nounrolledremainder.ll
    llvm/test/CodeGen/Thumb2/mve-pred-constfold.ll
    llvm/test/CodeGen/Thumb2/mve-pred-convert.ll
    llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
    llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
    llvm/test/CodeGen/Thumb2/mve-qrintr.ll
    llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind16-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind16-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind32-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind8-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
    llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll
    llvm/test/CodeGen/Thumb2/mve-shifts-scalar.ll
    llvm/test/CodeGen/Thumb2/mve-stack.ll
    llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
    llvm/test/CodeGen/Thumb2/mve-vabd.ll
    llvm/test/CodeGen/Thumb2/mve-vabdus.ll
    llvm/test/CodeGen/Thumb2/mve-vctp.ll
    llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-slp.ll
    llvm/test/CodeGen/Thumb2/mve-vector-spill.ll
    llvm/test/CodeGen/Thumb2/mve-vhadd.ll
    llvm/test/CodeGen/Thumb2/mve-vld2-post.ll
    llvm/test/CodeGen/Thumb2/mve-vld2.ll
    llvm/test/CodeGen/Thumb2/mve-vld3.ll
    llvm/test/CodeGen/Thumb2/mve-vld4-post.ll
    llvm/test/CodeGen/Thumb2/mve-vld4.ll
    llvm/test/CodeGen/Thumb2/mve-vldshuffle.ll
    llvm/test/CodeGen/Thumb2/mve-vldst4.ll
    llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll
    llvm/test/CodeGen/Thumb2/mve-vmla.ll
    llvm/test/CodeGen/Thumb2/mve-vmovnstore.ll
    llvm/test/CodeGen/Thumb2/mve-vmulh.ll
    llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
    llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
    llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
    llvm/test/CodeGen/Thumb2/mve-vst2-post.ll
    llvm/test/CodeGen/Thumb2/mve-vst2.ll
    llvm/test/CodeGen/Thumb2/mve-vst3.ll
    llvm/test/CodeGen/Thumb2/mve-vst4-post.ll
    llvm/test/CodeGen/Thumb2/mve-vst4.ll
    llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
    llvm/test/CodeGen/Thumb2/mve-zext-masked-load.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-indirect-tail-call.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-outliner-2.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-overalign.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-unsupported-arch.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-varargs-1.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-varargs-2.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
    llvm/test/CodeGen/Thumb2/pic-load.ll
    llvm/test/CodeGen/Thumb2/pr52817.ll
    llvm/test/CodeGen/Thumb2/schedm7-hazard.ll
    llvm/test/CodeGen/Thumb2/segmented-stacks.ll
    llvm/test/CodeGen/Thumb2/setjmp_longjmp.ll
    llvm/test/CodeGen/Thumb2/shift_parts.ll
    llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
    llvm/test/CodeGen/Thumb2/tail-call-r9.ll
    llvm/test/CodeGen/Thumb2/thumb2-branch.ll
    llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll
    llvm/test/CodeGen/Thumb2/thumb2-call.ll
    llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
    llvm/test/CodeGen/Thumb2/thumb2-cpsr-liveness.ll
    llvm/test/CodeGen/Thumb2/thumb2-execute-only-prologue.ll
    llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
    llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
    llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
    llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
    llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
    llvm/test/CodeGen/Thumb2/thumb2-mul.ll
    llvm/test/CodeGen/Thumb2/thumb2-smul.ll
    llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
    llvm/test/CodeGen/Thumb2/thumb2-str.ll
    llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
    llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
    llvm/test/CodeGen/Thumb2/thumb2-strb.ll
    llvm/test/CodeGen/Thumb2/thumb2-strh.ll
    llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
    llvm/test/CodeGen/Thumb2/tls1.ll
    llvm/test/CodeGen/Thumb2/tls2.ll
    llvm/test/CodeGen/Thumb2/tpsoft.ll
    llvm/test/CodeGen/Thumb2/unreachable-large-offset-gep.ll
    llvm/test/CodeGen/Thumb2/v8_IT_1.ll
    llvm/test/CodeGen/Thumb2/v8_IT_2.ll
    llvm/test/CodeGen/Thumb2/v8_IT_3.ll
    llvm/test/CodeGen/Thumb2/v8_IT_4.ll
    llvm/test/CodeGen/Thumb2/v8_IT_5.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll b/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
index f76c8ff4d0f8e..d9f72daf54bf2 100644
--- a/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
@@ -4,9 +4,9 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
 target triple = "thumbv6t2-elf"
 	%struct.dwarf_cie = type <{ i32, i32, i8, [0 x i8], [3 x i8] }>
 
-declare i8* @read_sleb128(i8*, i32* nocapture) nounwind
+declare ptr @read_sleb128(ptr, ptr nocapture) nounwind
 
-define i32 @get_cie_encoding(%struct.dwarf_cie* %cie) nounwind {
+define i32 @get_cie_encoding(ptr %cie) nounwind {
 entry:
 	br i1 undef, label %bb1, label %bb13
 
@@ -17,8 +17,8 @@ bb1:		; preds = %entry
 bb.i:		; preds = %bb.i, %bb1
 	%indvar.i = phi i32 [ 0, %bb1 ], [ %2, %bb.i ]		; <i32> [#uses=3]
 	%tmp39 = add i32 %indvar.i, %tmp38		; <i32> [#uses=1]
-	%p_addr.0.i = getelementptr i8, i8* undef, i32 %tmp39		; <i8*> [#uses=1]
-	%0 = load i8, i8* %p_addr.0.i, align 1		; <i8> [#uses=1]
+	%p_addr.0.i = getelementptr i8, ptr undef, i32 %tmp39		; <ptr> [#uses=1]
+	%0 = load i8, ptr %p_addr.0.i, align 1		; <i8> [#uses=1]
 	%1 = icmp slt i8 %0, 0		; <i1> [#uses=1]
 	%2 = add i32 %indvar.i, 1		; <i32> [#uses=1]
 	br i1 %1, label %bb.i, label %read_uleb128.exit
@@ -26,8 +26,8 @@ bb.i:		; preds = %bb.i, %bb1
 read_uleb128.exit:		; preds = %bb.i
 	%.sum40 = add i32 %indvar.i, undef		; <i32> [#uses=1]
 	%.sum31 = add i32 %.sum40, 2		; <i32> [#uses=1]
-	%scevgep.i = getelementptr %struct.dwarf_cie, %struct.dwarf_cie* %cie, i32 0, i32 3, i32 %.sum31		; <i8*> [#uses=1]
-	%3 = call  i8* @read_sleb128(i8* %scevgep.i, i32* undef)		; <i8*> [#uses=0]
+	%scevgep.i = getelementptr %struct.dwarf_cie, ptr %cie, i32 0, i32 3, i32 %.sum31		; <ptr> [#uses=1]
+	%3 = call  ptr @read_sleb128(ptr %scevgep.i, ptr undef)		; <ptr> [#uses=0]
 	unreachable
 
 bb13:		; preds = %entry

diff  --git a/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll b/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
index 1d5d038663a5c..62f324cd41f10 100644
--- a/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
@@ -1,36 +1,33 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -mattr=+vfp2,+thumb2 | FileCheck %s
 ; rdar://7076238
 
-@"\01LC" = external constant [36 x i8], align 1		; <[36 x i8]*> [#uses=1]
+@"\01LC" = external constant [36 x i8], align 1		; <ptr> [#uses=1]
 
 define i32 @t(i32, ...) nounwind "frame-pointer"="all" {
 entry:
 ; CHECK-LABEL: t:
 ; CHECK: add r7, sp, #12
-	%1 = load i8*, i8** undef, align 4		; <i8*> [#uses=3]
-	%2 = getelementptr i8, i8* %1, i32 4		; <i8*> [#uses=1]
-	%3 = getelementptr i8, i8* %1, i32 8		; <i8*> [#uses=1]
-	%4 = bitcast i8* %2 to i32*		; <i32*> [#uses=1]
-	%5 = load i32, i32* %4, align 4		; <i32> [#uses=1]
-	%6 = trunc i32 %5 to i8		; <i8> [#uses=1]
-	%7 = getelementptr i8, i8* %1, i32 12		; <i8*> [#uses=1]
-	%8 = bitcast i8* %3 to i32*		; <i32*> [#uses=1]
-	%9 = load i32, i32* %8, align 4		; <i32> [#uses=1]
+	%1 = load ptr, ptr undef, align 4		; <ptr> [#uses=3]
+	%2 = getelementptr i8, ptr %1, i32 4		; <ptr> [#uses=1]
+	%3 = getelementptr i8, ptr %1, i32 8		; <ptr> [#uses=1]
+	%4 = load i32, ptr %2, align 4		; <i32> [#uses=1]
+	%5 = trunc i32 %4 to i8		; <i8> [#uses=1]
+	%6 = getelementptr i8, ptr %1, i32 12		; <ptr> [#uses=1]
+	%7 = load i32, ptr %3, align 4		; <i32> [#uses=1]
+	%8 = trunc i32 %7 to i16		; <i16> [#uses=1]
+	%9 = load i32, ptr %6, align 4		; <i32> [#uses=1]
 	%10 = trunc i32 %9 to i16		; <i16> [#uses=1]
-	%11 = bitcast i8* %7 to i32*		; <i32*> [#uses=1]
-	%12 = load i32, i32* %11, align 4		; <i32> [#uses=1]
-	%13 = trunc i32 %12 to i16		; <i16> [#uses=1]
-	%14 = load i32, i32* undef, align 4		; <i32> [#uses=2]
-	%15 = sext i8 %6 to i32		; <i32> [#uses=2]
-	%16 = sext i16 %10 to i32		; <i32> [#uses=2]
-	%17 = sext i16 %13 to i32		; <i32> [#uses=2]
-	%18 = call  i32 (i8*, ...) @printf(i8* getelementptr ([36 x i8], [36 x i8]* @"\01LC", i32 0, i32 0), i32 -128, i32 0, i32 %15, i32 %16, i32 %17, i32 0, i32 %14) nounwind		; <i32> [#uses=0]
-	%19 = add i32 0, %15		; <i32> [#uses=1]
-	%20 = add i32 %19, %16		; <i32> [#uses=1]
-	%21 = add i32 %20, %14		; <i32> [#uses=1]
-	%22 = add i32 %21, %17		; <i32> [#uses=1]
-	%23 = add i32 %22, 0		; <i32> [#uses=1]
-	ret i32 %23
+	%11 = load i32, ptr undef, align 4		; <i32> [#uses=2]
+	%12 = sext i8 %5 to i32		; <i32> [#uses=2]
+	%13 = sext i16 %8 to i32		; <i32> [#uses=2]
+	%14 = sext i16 %10 to i32		; <i32> [#uses=2]
+	%15 = call  i32 (ptr, ...) @printf(ptr @"\01LC", i32 -128, i32 0, i32 %12, i32 %13, i32 %14, i32 0, i32 %11) nounwind		; <i32> [#uses=0]
+	%16 = add i32 0, %12		; <i32> [#uses=1]
+	%17 = add i32 %16, %13		; <i32> [#uses=1]
+	%18 = add i32 %17, %11		; <i32> [#uses=1]
+	%19 = add i32 %18, %14		; <i32> [#uses=1]
+	%20 = add i32 %19, 0		; <i32> [#uses=1]
+	ret i32 %20
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll b/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
index 1b4dbb326194b..8b1d29c82289d 100644
--- a/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
@@ -1,65 +1,64 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all
 
-	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.JHUFF_TBL = type { [17 x i8], [256 x i8], i32 }
 	%struct.JQUANT_TBL = type { [64 x i16], i32 }
 	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { i8*, i32 }
+	%struct.__sbuf = type { ptr, i32 }
 	%struct.anon = type { [8 x i32], [48 x i8] }
-	%struct.backing_store_info = type { void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*)*, %struct.FILE*, [64 x i8] }
-	%struct.jpeg_color_deconverter = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32, i8**, i32)* }
-	%struct.jpeg_color_quantizer = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i8**, i32)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)* }
-	%struct.jpeg_common_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32 }
-	%struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.JQUANT_TBL*, i8* }
-	%struct.jpeg_d_coef_controller = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, i8***)*, %struct.jvirt_barray_control** }
-	%struct.jpeg_d_main_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i32*, i32)* }
-	%struct.jpeg_d_post_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)* }
-	%struct.jpeg_decomp_master = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 }
-	%struct.jpeg_decompress_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32, %struct.jpeg_source_mgr*, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %struct.JQUANT_TBL*], [4 x %struct.JHUFF_TBL*], [4 x %struct.JHUFF_TBL*], i32, %struct.jpeg_component_info*, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, i8*, i32, [4 x %struct.jpeg_component_info*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %struct.jpeg_decomp_master*, %struct.jpeg_d_main_controller*, %struct.jpeg_d_coef_controller*, %struct.jpeg_d_post_controller*, %struct.jpeg_input_controller*, %struct.jpeg_marker_reader*, %struct.jpeg_entropy_decoder*, %struct.jpeg_inverse_dct*, %struct.jpeg_upsampler*, %struct.jpeg_color_deconverter*, %struct.jpeg_color_quantizer* }
-	%struct.jpeg_entropy_decoder = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, [64 x i16]**)* }
-	%struct.jpeg_error_mgr = type { void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i8*)*, void (%struct.jpeg_common_struct*)*, i32, %struct.anon, i32, i32, i8**, i32, i8**, i32, i32 }
-	%struct.jpeg_input_controller = type { i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32, i32 }
-	%struct.jpeg_inverse_dct = type { void (%struct.jpeg_decompress_struct*)*, [10 x void (%struct.jpeg_decompress_struct*, %struct.jpeg_component_info*, i16*, i8**, i32)*] }
-	%struct.jpeg_marker_reader = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, [16 x i32 (%struct.jpeg_decompress_struct*)*], i32, i32, i32, i32 }
-	%struct.jpeg_memory_mgr = type { i8* (%struct.jpeg_common_struct*, i32, i32)*, i8* (%struct.jpeg_common_struct*, i32, i32)*, i8** (%struct.jpeg_common_struct*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, i32, i32, i32)*, %struct.jvirt_sarray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, %struct.jvirt_barray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, void (%struct.jpeg_common_struct*)*, i8** (%struct.jpeg_common_struct*, %struct.jvirt_sarray_control*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, %struct.jvirt_barray_control*, i32, i32, i32)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, i32 }
-	%struct.jpeg_progress_mgr = type { void (%struct.jpeg_common_struct*)*, i32, i32, i32, i32 }
-	%struct.jpeg_source_mgr = type { i8*, i32, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i32)*, i32 (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*)* }
-	%struct.jpeg_upsampler = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)*, i32 }
-	%struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
-	%struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
+	%struct.backing_store_info = type { ptr, ptr, ptr, ptr, [64 x i8] }
+	%struct.jpeg_color_deconverter = type { ptr, ptr }
+	%struct.jpeg_color_quantizer = type { ptr, ptr, ptr, ptr }
+	%struct.jpeg_common_struct = type { ptr, ptr, ptr, i32, i32 }
+	%struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr }
+	%struct.jpeg_d_coef_controller = type { ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_d_main_controller = type { ptr, ptr }
+	%struct.jpeg_d_post_controller = type { ptr, ptr }
+	%struct.jpeg_decomp_master = type { ptr, ptr, i32 }
+	%struct.jpeg_decompress_struct = type { ptr, ptr, ptr, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, ptr, [4 x ptr], [4 x ptr], [4 x ptr], i32, ptr, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, ptr, i32, [4 x ptr], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_entropy_decoder = type { ptr, ptr }
+	%struct.jpeg_error_mgr = type { ptr, ptr, ptr, ptr, ptr, i32, %struct.anon, i32, i32, ptr, i32, ptr, i32, i32 }
+	%struct.jpeg_input_controller = type { ptr, ptr, ptr, ptr, i32, i32 }
+	%struct.jpeg_inverse_dct = type { ptr, [10 x ptr] }
+	%struct.jpeg_marker_reader = type { ptr, ptr, ptr, ptr, [16 x ptr], i32, i32, i32, i32 }
+	%struct.jpeg_memory_mgr = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32 }
+	%struct.jpeg_progress_mgr = type { ptr, i32, i32, i32, i32 }
+	%struct.jpeg_source_mgr = type { ptr, i32, ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_upsampler = type { ptr, ptr, i32 }
+	%struct.jvirt_barray_control = type { ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, %struct.backing_store_info }
+	%struct.jvirt_sarray_control = type { ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, %struct.backing_store_info }
 
-define void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
+define void @jpeg_idct_float(ptr nocapture %cinfo, ptr nocapture %compptr, ptr nocapture %coef_block, ptr nocapture %output_buf, i32 %output_col) nounwind {
 entry:
-	%workspace = alloca [64 x float], align 4		; <[64 x float]*> [#uses=11]
-	%0 = load i8*, i8** undef, align 4		; <i8*> [#uses=5]
+	%workspace = alloca [64 x float], align 4		; <ptr> [#uses=11]
+	%0 = load ptr, ptr undef, align 4		; <ptr> [#uses=5]
 	br label %bb
 
 bb:		; preds = %bb, %entry
 	%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ]		; <i32> [#uses=11]
 	%tmp39 = add i32 %indvar, 8		; <i32> [#uses=0]
 	%tmp41 = add i32 %indvar, 16		; <i32> [#uses=2]
-	%scevgep42 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp41		; <float*> [#uses=1]
+	%scevgep42 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp41		; <ptr> [#uses=1]
 	%tmp43 = add i32 %indvar, 24		; <i32> [#uses=1]
-	%scevgep44 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp43		; <float*> [#uses=1]
+	%scevgep44 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp43		; <ptr> [#uses=1]
 	%tmp45 = add i32 %indvar, 32		; <i32> [#uses=1]
-	%scevgep46 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp45		; <float*> [#uses=1]
+	%scevgep46 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp45		; <ptr> [#uses=1]
 	%tmp47 = add i32 %indvar, 40		; <i32> [#uses=1]
-	%scevgep48 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp47		; <float*> [#uses=1]
+	%scevgep48 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp47		; <ptr> [#uses=1]
 	%tmp49 = add i32 %indvar, 48		; <i32> [#uses=1]
-	%scevgep50 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp49		; <float*> [#uses=1]
+	%scevgep50 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp49		; <ptr> [#uses=1]
 	%tmp51 = add i32 %indvar, 56		; <i32> [#uses=1]
-	%scevgep52 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp51		; <float*> [#uses=1]
-	%wsptr.119 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %indvar		; <float*> [#uses=1]
+	%scevgep52 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp51		; <ptr> [#uses=1]
+	%wsptr.119 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %indvar		; <ptr> [#uses=1]
 	%tmp54 = shl i32 %indvar, 2		; <i32> [#uses=1]
-	%scevgep76 = getelementptr i8, i8* undef, i32 %tmp54		; <i8*> [#uses=1]
-	%quantptr.118 = bitcast i8* %scevgep76 to float*		; <float*> [#uses=1]
-	%scevgep79 = getelementptr i16, i16* %coef_block, i32 %tmp41		; <i16*> [#uses=0]
-	%inptr.117 = getelementptr i16, i16* %coef_block, i32 %indvar		; <i16*> [#uses=1]
-	%1 = load i16, i16* null, align 2		; <i16> [#uses=1]
-	%2 = load i16, i16* undef, align 2		; <i16> [#uses=1]
-	%3 = load i16, i16* %inptr.117, align 2		; <i16> [#uses=1]
+	%scevgep76 = getelementptr i8, ptr undef, i32 %tmp54		; <ptr> [#uses=1]
+	%scevgep79 = getelementptr i16, ptr %coef_block, i32 %tmp41		; <ptr> [#uses=0]
+	%inptr.117 = getelementptr i16, ptr %coef_block, i32 %indvar		; <ptr> [#uses=1]
+	%1 = load i16, ptr null, align 2		; <i16> [#uses=1]
+	%2 = load i16, ptr undef, align 2		; <i16> [#uses=1]
+	%3 = load i16, ptr %inptr.117, align 2		; <i16> [#uses=1]
 	%4 = sitofp i16 %3 to float		; <float> [#uses=1]
-	%5 = load float, float* %quantptr.118, align 4		; <float> [#uses=1]
+	%5 = load float, ptr %scevgep76, align 4		; <float> [#uses=1]
 	%6 = fmul float %4, %5		; <float> [#uses=1]
 	%7 = fsub float %6, undef		; <float> [#uses=2]
 	%8 = fmul float undef, 0x3FF6A09E60000000		; <float> [#uses=1]
@@ -70,7 +69,7 @@ bb:		; preds = %bb, %entry
 	%13 = sitofp i16 %1 to float		; <float> [#uses=1]
 	%14 = fmul float %13, undef		; <float> [#uses=2]
 	%15 = sitofp i16 %2 to float		; <float> [#uses=1]
-	%16 = load float, float* undef, align 4		; <float> [#uses=1]
+	%16 = load float, ptr undef, align 4		; <float> [#uses=1]
 	%17 = fmul float %15, %16		; <float> [#uses=1]
 	%18 = fadd float %14, undef		; <float> [#uses=2]
 	%19 = fsub float %14, undef		; <float> [#uses=2]
@@ -88,20 +87,20 @@ bb:		; preds = %bb, %entry
 	%31 = fsub float %23, %30		; <float> [#uses=3]
 	%32 = fadd float %27, %31		; <float> [#uses=1]
 	%33 = fadd float %10, %21		; <float> [#uses=1]
-	store float %33, float* %wsptr.119, align 4
+	store float %33, ptr %wsptr.119, align 4
 	%34 = fsub float %10, %21		; <float> [#uses=1]
-	store float %34, float* %scevgep52, align 4
+	store float %34, ptr %scevgep52, align 4
 	%35 = fadd float %11, %30		; <float> [#uses=1]
-	store float %35, float* null, align 4
+	store float %35, ptr null, align 4
 	%36 = fsub float %11, %30		; <float> [#uses=1]
-	store float %36, float* %scevgep50, align 4
+	store float %36, ptr %scevgep50, align 4
 	%37 = fadd float %12, %31		; <float> [#uses=1]
-	store float %37, float* %scevgep42, align 4
+	store float %37, ptr %scevgep42, align 4
 	%38 = fsub float %12, %31		; <float> [#uses=1]
-	store float %38, float* %scevgep48, align 4
+	store float %38, ptr %scevgep48, align 4
 	%39 = fadd float undef, %32		; <float> [#uses=1]
-	store float %39, float* %scevgep46, align 4
-	store float undef, float* %scevgep44, align 4
+	store float %39, ptr %scevgep46, align 4
+	store float undef, ptr %scevgep44, align 4
 	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=1]
 	br i1 undef, label %bb6, label %bb
 
@@ -114,22 +113,22 @@ bb6:		; preds = %bb
 
 bb8:		; preds = %bb8, %bb6
 	%ctr.116 = phi i32 [ 0, %bb6 ], [ %88, %bb8 ]		; <i32> [#uses=3]
-	%scevgep = getelementptr i8*, i8** %output_buf, i32 %ctr.116		; <i8**> [#uses=1]
+	%scevgep = getelementptr ptr, ptr %output_buf, i32 %ctr.116		; <ptr> [#uses=1]
 	%tmp = shl i32 %ctr.116, 3		; <i32> [#uses=5]
 	%tmp2392 = or i32 %tmp, 4		; <i32> [#uses=1]
-	%scevgep24 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2392		; <float*> [#uses=1]
+	%scevgep24 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp2392		; <ptr> [#uses=1]
 	%tmp2591 = or i32 %tmp, 2		; <i32> [#uses=1]
-	%scevgep26 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2591		; <float*> [#uses=1]
+	%scevgep26 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp2591		; <ptr> [#uses=1]
 	%tmp2790 = or i32 %tmp, 6		; <i32> [#uses=1]
-	%scevgep28 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2790		; <float*> [#uses=1]
+	%scevgep28 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp2790		; <ptr> [#uses=1]
 	%tmp3586 = or i32 %tmp, 7		; <i32> [#uses=0]
-	%wsptr.215 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp		; <float*> [#uses=1]
-	%40 = load i8*, i8** %scevgep, align 4		; <i8*> [#uses=4]
-	%41 = load float, float* %wsptr.215, align 4		; <float> [#uses=1]
-	%42 = load float, float* %scevgep24, align 4		; <float> [#uses=1]
+	%wsptr.215 = getelementptr [64 x float], ptr %workspace, i32 0, i32 %tmp		; <ptr> [#uses=1]
+	%40 = load ptr, ptr %scevgep, align 4		; <ptr> [#uses=4]
+	%41 = load float, ptr %wsptr.215, align 4		; <float> [#uses=1]
+	%42 = load float, ptr %scevgep24, align 4		; <float> [#uses=1]
 	%43 = fadd float %41, %42		; <float> [#uses=1]
-	%44 = load float, float* %scevgep26, align 4		; <float> [#uses=1]
-	%45 = load float, float* %scevgep28, align 4		; <float> [#uses=1]
+	%44 = load float, ptr %scevgep26, align 4		; <float> [#uses=1]
+	%45 = load float, ptr %scevgep28, align 4		; <float> [#uses=1]
 	%46 = fadd float %44, %45		; <float> [#uses=1]
 	%47 = fsub float %43, %46		; <float> [#uses=2]
 	%48 = fsub float undef, 0.000000e+00		; <float> [#uses=1]
@@ -139,51 +138,51 @@ bb8:		; preds = %bb8, %bb6
 	%52 = lshr i32 %51, 3		; <i32> [#uses=1]
 	%53 = and i32 %52, 1023		; <i32> [#uses=1]
 	%.sum14 = add i32 %53, 128		; <i32> [#uses=1]
-	%54 = getelementptr i8, i8* %0, i32 %.sum14		; <i8*> [#uses=1]
-	%55 = load i8, i8* %54, align 1		; <i8> [#uses=1]
-	store i8 %55, i8* null, align 1
-	%56 = getelementptr i8, i8* %40, i32 %.sum10		; <i8*> [#uses=1]
-	store i8 0, i8* %56, align 1
-	%57 = load i8, i8* null, align 1		; <i8> [#uses=1]
-	%58 = getelementptr i8, i8* %40, i32 %.sum8		; <i8*> [#uses=1]
-	store i8 %57, i8* %58, align 1
+	%54 = getelementptr i8, ptr %0, i32 %.sum14		; <ptr> [#uses=1]
+	%55 = load i8, ptr %54, align 1		; <i8> [#uses=1]
+	store i8 %55, ptr null, align 1
+	%56 = getelementptr i8, ptr %40, i32 %.sum10		; <ptr> [#uses=1]
+	store i8 0, ptr %56, align 1
+	%57 = load i8, ptr null, align 1		; <i8> [#uses=1]
+	%58 = getelementptr i8, ptr %40, i32 %.sum8		; <ptr> [#uses=1]
+	store i8 %57, ptr %58, align 1
 	%59 = fadd float undef, %48		; <float> [#uses=1]
 	%60 = fptosi float %59 to i32		; <i32> [#uses=1]
 	%61 = add i32 %60, 4		; <i32> [#uses=1]
 	%62 = lshr i32 %61, 3		; <i32> [#uses=1]
 	%63 = and i32 %62, 1023		; <i32> [#uses=1]
 	%.sum7 = add i32 %63, 128		; <i32> [#uses=1]
-	%64 = getelementptr i8, i8* %0, i32 %.sum7		; <i8*> [#uses=1]
-	%65 = load i8, i8* %64, align 1		; <i8> [#uses=1]
-	%66 = getelementptr i8, i8* %40, i32 %.sum6		; <i8*> [#uses=1]
-	store i8 %65, i8* %66, align 1
+	%64 = getelementptr i8, ptr %0, i32 %.sum7		; <ptr> [#uses=1]
+	%65 = load i8, ptr %64, align 1		; <i8> [#uses=1]
+	%66 = getelementptr i8, ptr %40, i32 %.sum6		; <ptr> [#uses=1]
+	store i8 %65, ptr %66, align 1
 	%67 = fptosi float undef to i32		; <i32> [#uses=1]
 	%68 = add i32 %67, 4		; <i32> [#uses=1]
 	%69 = lshr i32 %68, 3		; <i32> [#uses=1]
 	%70 = and i32 %69, 1023		; <i32> [#uses=1]
 	%.sum5 = add i32 %70, 128		; <i32> [#uses=1]
-	%71 = getelementptr i8, i8* %0, i32 %.sum5		; <i8*> [#uses=1]
-	%72 = load i8, i8* %71, align 1		; <i8> [#uses=1]
-	store i8 %72, i8* undef, align 1
+	%71 = getelementptr i8, ptr %0, i32 %.sum5		; <ptr> [#uses=1]
+	%72 = load i8, ptr %71, align 1		; <i8> [#uses=1]
+	store i8 %72, ptr undef, align 1
 	%73 = fadd float %47, undef		; <float> [#uses=1]
 	%74 = fptosi float %73 to i32		; <i32> [#uses=1]
 	%75 = add i32 %74, 4		; <i32> [#uses=1]
 	%76 = lshr i32 %75, 3		; <i32> [#uses=1]
 	%77 = and i32 %76, 1023		; <i32> [#uses=1]
 	%.sum3 = add i32 %77, 128		; <i32> [#uses=1]
-	%78 = getelementptr i8, i8* %0, i32 %.sum3		; <i8*> [#uses=1]
-	%79 = load i8, i8* %78, align 1		; <i8> [#uses=1]
-	store i8 %79, i8* undef, align 1
+	%78 = getelementptr i8, ptr %0, i32 %.sum3		; <ptr> [#uses=1]
+	%79 = load i8, ptr %78, align 1		; <i8> [#uses=1]
+	store i8 %79, ptr undef, align 1
 	%80 = fsub float %47, undef		; <float> [#uses=1]
 	%81 = fptosi float %80 to i32		; <i32> [#uses=1]
 	%82 = add i32 %81, 4		; <i32> [#uses=1]
 	%83 = lshr i32 %82, 3		; <i32> [#uses=1]
 	%84 = and i32 %83, 1023		; <i32> [#uses=1]
 	%.sum1 = add i32 %84, 128		; <i32> [#uses=1]
-	%85 = getelementptr i8, i8* %0, i32 %.sum1		; <i8*> [#uses=1]
-	%86 = load i8, i8* %85, align 1		; <i8> [#uses=1]
-	%87 = getelementptr i8, i8* %40, i32 %.sum		; <i8*> [#uses=1]
-	store i8 %86, i8* %87, align 1
+	%85 = getelementptr i8, ptr %0, i32 %.sum1		; <ptr> [#uses=1]
+	%86 = load i8, ptr %85, align 1		; <i8> [#uses=1]
+	%87 = getelementptr i8, ptr %40, i32 %.sum		; <ptr> [#uses=1]
+	store i8 %86, ptr %87, align 1
 	%88 = add i32 %ctr.116, 1		; <i32> [#uses=2]
 	%exitcond = icmp eq i32 %88, 8		; <i1> [#uses=1]
 	br i1 %exitcond, label %return, label %bb8

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll b/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
index 1eafa5b2042c6..296da07180b5e 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
@@ -1,18 +1,18 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all -arm-atomic-cfg-tidy=0 | FileCheck %s
 
- at csize = external global [100 x [20 x [4 x i8]]]		; <[100 x [20 x [4 x i8]]]*> [#uses=1]
- at vsize = external global [100 x [20 x [4 x i8]]]		; <[100 x [20 x [4 x i8]]]*> [#uses=1]
- at cll = external global [20 x [10 x i8]]		; <[20 x [10 x i8]]*> [#uses=1]
- at lefline = external global [100 x [20 x i32]]		; <[100 x [20 x i32]]*> [#uses=1]
- at sep = external global [20 x i32]		; <[20 x i32]*> [#uses=1]
+ at csize = external global [100 x [20 x [4 x i8]]]		; <ptr> [#uses=1]
+ at vsize = external global [100 x [20 x [4 x i8]]]		; <ptr> [#uses=1]
+ at cll = external global [20 x [10 x i8]]		; <ptr> [#uses=1]
+ at lefline = external global [100 x [20 x i32]]		; <ptr> [#uses=1]
+ at sep = external global [20 x i32]		; <ptr> [#uses=1]
 
-define void @main(i32 %argc, i8** %argv) noreturn nounwind {
+define void @main(i32 %argc, ptr %argv) noreturn nounwind {
 ; CHECK-LABEL: main:
 ; CHECK: ldrb
 entry:
-	%nb.i.i.i = alloca [25 x i8], align 1		; <[25 x i8]*> [#uses=0]
-	%line.i.i.i = alloca [200 x i8], align 1		; <[200 x i8]*> [#uses=1]
-	%line.i = alloca [1024 x i8], align 1		; <[1024 x i8]*> [#uses=0]
+	%nb.i.i.i = alloca [25 x i8], align 1		; <ptr> [#uses=0]
+	%line.i.i.i = alloca [200 x i8], align 1		; <ptr> [#uses=1]
+	%line.i = alloca [1024 x i8], align 1		; <ptr> [#uses=0]
 	br i1 undef, label %bb.i.i, label %bb4.preheader.i
 
 bb.i.i:		; preds = %entry
@@ -22,7 +22,7 @@ bb4.preheader.i:		; preds = %entry
 	br i1 undef, label %tbl.exit, label %bb.i.preheader
 
 bb.i.preheader:		; preds = %bb4.preheader.i
-	%line3.i.i.i = getelementptr [200 x i8], [200 x i8]* %line.i.i.i, i32 0, i32 0		; <i8*> [#uses=1]
+	%line3.i.i.i = getelementptr [200 x i8], ptr %line.i.i.i, i32 0, i32 0		; <ptr> [#uses=1]
 	br label %bb.i
 
 bb.i:		; preds = %bb4.backedge.i, %bb.i.preheader
@@ -38,7 +38,7 @@ bb2.i184.i.i:		; preds = %bb.i183.i.i, %bb3.i
 	br i1 undef, label %bb5.i185.i.i, label %bb35.preheader.i.i.i
 
 bb35.preheader.i.i.i:		; preds = %bb2.i184.i.i
-	%0 = load i8, i8* %line3.i.i.i, align 1		; <i8> [#uses=1]
+	%0 = load i8, ptr %line3.i.i.i, align 1		; <i8> [#uses=1]
 	%1 = icmp eq i8 %0, 59		; <i1> [#uses=1]
 	br i1 %1, label %bb36.i.i.i, label %bb9.i186.i.i
 
@@ -53,19 +53,19 @@ bb36.i.i.i:		; preds = %bb35.preheader.i.i.i
 
 bb.i171.i.i:		; preds = %bb3.i176.i.i, %bb36.i.i.i, %bb5.i185.i.i
 	%2 = phi i32 [ %4, %bb3.i176.i.i ], [ 0, %bb36.i.i.i ], [ 0, %bb5.i185.i.i ]		; <i32> [#uses=6]
-	%scevgep16.i.i.i = getelementptr [20 x i32], [20 x i32]* @sep, i32 0, i32 %2		; <i32*> [#uses=1]
-	%scevgep18.i.i.i = getelementptr [20 x [10 x i8]], [20 x [10 x i8]]* @cll, i32 0, i32 %2, i32 0		; <i8*> [#uses=0]
-	store i32 -1, i32* %scevgep16.i.i.i, align 4
+	%scevgep16.i.i.i = getelementptr [20 x i32], ptr @sep, i32 0, i32 %2		; <ptr> [#uses=1]
+	%scevgep18.i.i.i = getelementptr [20 x [10 x i8]], ptr @cll, i32 0, i32 %2, i32 0		; <ptr> [#uses=0]
+	store i32 -1, ptr %scevgep16.i.i.i, align 4
 	br label %bb1.i175.i.i
 
 bb1.i175.i.i:		; preds = %bb1.i175.i.i, %bb.i171.i.i
 	%i.03.i172.i.i = phi i32 [ 0, %bb.i171.i.i ], [ %3, %bb1.i175.i.i ]		; <i32> [#uses=4]
-	%scevgep11.i.i.i = getelementptr [100 x [20 x i32]], [100 x [20 x i32]]* @lefline, i32 0, i32 %i.03.i172.i.i, i32 %2		; <i32*> [#uses=1]
-	%scevgep12.i.i.i = getelementptr [100 x [20 x [4 x i8]]], [100 x [20 x [4 x i8]]]* @vsize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0		; <i8*> [#uses=1]
-	%scevgep13.i.i.i = getelementptr [100 x [20 x [4 x i8]]], [100 x [20 x [4 x i8]]]* @csize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0		; <i8*> [#uses=0]
-	store i8 0, i8* %scevgep12.i.i.i, align 1
-	store i32 0, i32* %scevgep11.i.i.i, align 4
-	store i32 108, i32* undef, align 4
+	%scevgep11.i.i.i = getelementptr [100 x [20 x i32]], ptr @lefline, i32 0, i32 %i.03.i172.i.i, i32 %2		; <ptr> [#uses=1]
+	%scevgep12.i.i.i = getelementptr [100 x [20 x [4 x i8]]], ptr @vsize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0		; <ptr> [#uses=1]
+	%scevgep13.i.i.i = getelementptr [100 x [20 x [4 x i8]]], ptr @csize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0		; <ptr> [#uses=0]
+	store i8 0, ptr %scevgep12.i.i.i, align 1
+	store i32 0, ptr %scevgep11.i.i.i, align 4
+	store i32 108, ptr undef, align 4
 	%3 = add i32 %i.03.i172.i.i, 1		; <i32> [#uses=2]
 	%exitcond.i174.i.i = icmp eq i32 %3, 100		; <i1> [#uses=1]
 	br i1 %exitcond.i174.i.i, label %bb3.i176.i.i, label %bb1.i175.i.i

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
index 85d8d5c51d77d..a0c71bc7b58c7 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
@@ -1,46 +1,45 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all
 
-	%0 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16)*, i32 }		; type %0
-	%1 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16*)*, i32 }		; type %1
-	%2 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, %"struct.xalanc_1_8::XalanDOMString"*)*, i32 }		; type %2
-	%3 = type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32, i32)*, i32 }		; type %3
-	%4 = type { void (%"struct.xalanc_1_8::FormatterToXML"*)*, i32 }		; type %4
+	%0 = type { ptr, i32 }		; type %0
+	%1 = type { ptr, i32 }		; type %1
+	%2 = type { ptr, i32 }		; type %2
+	%3 = type { ptr, i32 }		; type %3
+	%4 = type { ptr, i32 }		; type %4
 	%"struct.std::CharVectorType" = type { %"struct.std::_Vector_base<char,std::allocator<char> >" }
 	%"struct.std::_Bit_const_iterator" = type { %"struct.std::_Bit_iterator_base" }
-	%"struct.std::_Bit_iterator_base" = type { i32*, i32 }
+	%"struct.std::_Bit_iterator_base" = type { ptr, i32 }
 	%"struct.std::_Bvector_base<std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" }
-	%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_const_iterator", %"struct.std::_Bit_const_iterator", i32* }
+	%"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_const_iterator", %"struct.std::_Bit_const_iterator", ptr }
 	%"struct.std::_Vector_base<char,std::allocator<char> >" = type { %"struct.std::_Vector_base<char,std::allocator<char> >::_Vector_impl" }
-	%"struct.std::_Vector_base<char,std::allocator<char> >::_Vector_impl" = type { i8*, i8*, i8* }
+	%"struct.std::_Vector_base<char,std::allocator<char> >::_Vector_impl" = type { ptr, ptr, ptr }
 	%"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >" = type { %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >::_Vector_impl" }
-	%"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >::_Vector_impl" = type { i16*, i16*, i16* }
-	%"struct.std::basic_ostream<char,std::char_traits<char> >.base" = type { i32 (...)** }
+	%"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >::_Vector_impl" = type { ptr, ptr, ptr }
+	%"struct.std::basic_ostream<char,std::char_traits<char> >.base" = type { ptr }
 	%"struct.std::vector<bool,std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >" }
 	%"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >" = type { %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >" }
-	%"struct.xalanc_1_8::FormatterListener" = type { %"struct.std::basic_ostream<char,std::char_traits<char> >.base", %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, i32 }
-	%"struct.xalanc_1_8::FormatterToXML" = type { %"struct.xalanc_1_8::FormatterListener", %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, %"struct.xalanc_1_8::XalanOutputStream"*, i16, [256 x i16], [256 x i16], i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", i32, i32, %"struct.std::vector<bool,std::allocator<bool> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32, %"struct.std::CharVectorType", %"struct.std::vector<bool,std::allocator<bool> >", %0, %1, %2, %3, %0, %1, %2, %3, %4, i16*, i32 }
+	%"struct.xalanc_1_8::FormatterListener" = type { %"struct.std::basic_ostream<char,std::char_traits<char> >.base", ptr, i32 }
+	%"struct.xalanc_1_8::FormatterToXML" = type { %"struct.xalanc_1_8::FormatterListener", ptr, ptr, i16, [256 x i16], [256 x i16], i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", i32, i32, %"struct.std::vector<bool,std::allocator<bool> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32, %"struct.std::CharVectorType", %"struct.std::vector<bool,std::allocator<bool> >", %0, %1, %2, %3, %0, %1, %2, %3, %4, ptr, i32 }
 	%"struct.xalanc_1_8::XalanDOMString" = type { %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32 }
-	%"struct.xalanc_1_8::XalanOutputStream" = type { i32 (...)**, i32, %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, i32, %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, %"struct.std::CharVectorType" }
+	%"struct.xalanc_1_8::XalanOutputStream" = type { ptr, i32, ptr, i32, %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, %"struct.std::CharVectorType" }
 
-declare void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"*)
+declare void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(ptr)
 
-define void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length) {
+define void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(ptr %this, ptr %ch, i32 %length) {
 entry:
-	%0 = getelementptr %"struct.xalanc_1_8::FormatterToXML", %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 13		; <i8*> [#uses=1]
+	%0 = getelementptr %"struct.xalanc_1_8::FormatterToXML", ptr %this, i32 0, i32 13		; <ptr> [#uses=1]
 	br i1 undef, label %bb4, label %bb
 
 bb:		; preds = %entry
-	store i8 0, i8* %0, align 1
-	%1 = getelementptr %"struct.xalanc_1_8::FormatterToXML", %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 0, i32 0, i32 0		; <i32 (...)***> [#uses=1]
-	%2 = load i32 (...)**, i32 (...)*** %1, align 4		; <i32 (...)**> [#uses=1]
-	%3 = getelementptr i32 (...)*, i32 (...)** %2, i32 11		; <i32 (...)**> [#uses=1]
-	%4 = load i32 (...)*, i32 (...)** %3, align 4		; <i32 (...)*> [#uses=1]
-	%5 = bitcast i32 (...)* %4 to void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)*		; <void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)*> [#uses=1]
-	tail call  void %5(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length)
+	store i8 0, ptr %0, align 1
+	%1 = getelementptr %"struct.xalanc_1_8::FormatterToXML", ptr %this, i32 0, i32 0, i32 0, i32 0		; <ptr> [#uses=1]
+	%2 = load ptr, ptr %1, align 4		; <ptr> [#uses=1]
+	%3 = getelementptr ptr, ptr %2, i32 11		; <ptr> [#uses=1]
+	%4 = load ptr, ptr %3, align 4		; <ptr> [#uses=1]
+	tail call  void %4(ptr %this, ptr %ch, i32 %length)
 	ret void
 
 bb4:		; preds = %entry
-	tail call  void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"* %this)
-	tail call  void undef(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 0, i32 %length, i8 zeroext undef)
+	tail call  void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(ptr %this)
+	tail call  void undef(ptr %this, ptr %ch, i32 0, i32 %length, i8 zeroext undef)
 	ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
index b75a14b0a6744..af3bd57c4752b 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
@@ -1,9 +1,9 @@
 ; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi
 ; PR4681
 
-	%struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
-	%struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
- at .str2 = external constant [30 x i8], align 1		; <[30 x i8]*> [#uses=1]
+	%struct.FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i32, i32, [40 x i8] }
+	%struct._IO_marker = type { ptr, ptr, i32 }
+ at .str2 = external constant [30 x i8], align 1		; <ptr> [#uses=1]
 
 define i32 @__mf_heuristic_check(i32 %ptr, i32 %ptr_high) nounwind {
 entry:
@@ -16,14 +16,14 @@ bb1:		; preds = %entry
 	br i1 undef, label %bb9, label %bb2
 
 bb2:		; preds = %bb1
-	%0 = call i8* @llvm.frameaddress(i32 0)		; <i8*> [#uses=1]
-	%1 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* noalias undef, i8* noalias getelementptr ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i8* %0, i8* null) nounwind		; <i32> [#uses=0]
+	%0 = call ptr @llvm.frameaddress(i32 0)		; <ptr> [#uses=1]
+	%1 = call  i32 (ptr, ptr, ...) @fprintf(ptr noalias undef, ptr noalias @.str2, ptr %0, ptr null) nounwind		; <i32> [#uses=0]
 	unreachable
 
 bb9:		; preds = %bb1
 	ret i32 undef
 }
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
 
-declare i32 @fprintf(%struct.FILE* noalias nocapture, i8* noalias nocapture, ...) nounwind
+declare i32 @fprintf(ptr noalias nocapture, ptr noalias nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
index 20da9ba595b7c..a5076f849b79b 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
@@ -3,53 +3,53 @@
 	%0 = type { %struct.GAP }		; type %0
 	%1 = type { i16, i8, i8 }		; type %1
 	%2 = type { [2 x i32], [2 x i32] }		; type %2
-	%3 = type { %struct.rec* }		; type %3
+	%3 = type { ptr }		; type %3
 	%4 = type { i8, i8, i16, i8, i8, i8, i8 }		; type %4
-	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.FILE_POS = type { i8, i8, i16, i32 }
 	%struct.FIRST_UNION = type { %struct.FILE_POS }
 	%struct.FOURTH_UNION = type { %struct.STYLE }
 	%struct.GAP = type { i8, i8, i16 }
-	%struct.LIST = type { %struct.rec*, %struct.rec* }
+	%struct.LIST = type { ptr, ptr }
 	%struct.SECOND_UNION = type { %1 }
 	%struct.STYLE = type { %0, %0, i16, i16, i32 }
 	%struct.THIRD_UNION = type { %2 }
 	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { i8*, i32 }
-	%struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
+	%struct.__sbuf = type { ptr, i32 }
+	%struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, ptr, %3, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32 }
 	%struct.rec = type { %struct.head_type }
- at .str24239 = external constant [20 x i8], align 1		; <[20 x i8]*> [#uses=1]
- at no_file_pos = external global %4		; <%4*> [#uses=1]
- at zz_tmp = external global %struct.rec*		; <%struct.rec**> [#uses=1]
- at .str81872 = external constant [10 x i8], align 1		; <[10 x i8]*> [#uses=1]
- at out_fp = external global %struct.FILE*		; <%struct.FILE**> [#uses=2]
- at cpexists = external global i32		; <i32*> [#uses=2]
- at .str212784 = external constant [17 x i8], align 1		; <[17 x i8]*> [#uses=1]
- at .str1822946 = external constant [8 x i8], align 1		; <[8 x i8]*> [#uses=1]
- at .str1842948 = external constant [11 x i8], align 1		; <[11 x i8]*> [#uses=1]
+ at .str24239 = external constant [20 x i8], align 1		; <ptr> [#uses=1]
+ at no_file_pos = external global %4		; <ptr> [#uses=1]
+ at zz_tmp = external global ptr		; <ptr> [#uses=1]
+ at .str81872 = external constant [10 x i8], align 1		; <ptr> [#uses=1]
+ at out_fp = external global ptr		; <ptr> [#uses=2]
+ at cpexists = external global i32		; <ptr> [#uses=2]
+ at .str212784 = external constant [17 x i8], align 1		; <ptr> [#uses=1]
+ at .str1822946 = external constant [8 x i8], align 1		; <ptr> [#uses=1]
+ at .str1842948 = external constant [11 x i8], align 1		; <ptr> [#uses=1]
 
-declare i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
+declare i32 @fprintf(ptr nocapture, ptr nocapture, ...) nounwind
 
-declare i32 @"\01_fwrite"(i8*, i32, i32, i8*)
+declare i32 @"\01_fwrite"(ptr, i32, i32, ptr)
 
-declare %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
+declare ptr @OpenIncGraphicFile(ptr, i8 zeroext, ptr nocapture, ptr, ptr nocapture) nounwind
 
-declare void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
+declare void @Error(i32, i32, ptr, i32, ptr, ...) nounwind
 
-declare i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
+declare ptr @fgets(ptr, i32, ptr nocapture) nounwind
 
-define void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
+define void @PS_PrintGraphicInclude(ptr %x, i32 %colmark, i32 %rowmark) nounwind {
 entry:
 	br label %bb5
 
 bb5:		; preds = %bb5, %entry
-	%.pn = phi %struct.rec* [ %y.0, %bb5 ], [ undef, %entry ]		; <%struct.rec*> [#uses=1]
-	%y.0.in = getelementptr %struct.rec, %struct.rec* %.pn, i32 0, i32 0, i32 0, i32 1, i32 0		; <%struct.rec**> [#uses=1]
-	%y.0 = load %struct.rec*, %struct.rec** %y.0.in		; <%struct.rec*> [#uses=2]
+	%.pn = phi ptr [ %y.0, %bb5 ], [ undef, %entry ]		; <ptr> [#uses=1]
+	%y.0.in = getelementptr %struct.rec, ptr %.pn, i32 0, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=1]
+	%y.0 = load ptr, ptr %y.0.in		; <ptr> [#uses=2]
 	br i1 undef, label %bb5, label %bb6
 
 bb6:		; preds = %bb5
-	%0 = call  %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext 0, %struct.rec** undef, %struct.FILE_POS* null, i32* undef) nounwind		; <%struct.FILE*> [#uses=1]
+	%0 = call  ptr @OpenIncGraphicFile(ptr undef, i8 zeroext 0, ptr undef, ptr null, ptr undef) nounwind		; <ptr> [#uses=1]
 	br i1 false, label %bb.i, label %FontHalfXHeight.exit
 
 bb.i:		; preds = %bb6
@@ -62,33 +62,33 @@ bb.i1:		; preds = %FontHalfXHeight.exit
 	br label %FontSize.exit
 
 FontSize.exit:		; preds = %bb.i1, %FontHalfXHeight.exit
-	%1 = load i32, i32* undef, align 4		; <i32> [#uses=1]
+	%1 = load i32, ptr undef, align 4		; <i32> [#uses=1]
 	%2 = icmp ult i32 0, undef		; <i1> [#uses=1]
 	br i1 %2, label %bb.i5, label %FontName.exit
 
 bb.i5:		; preds = %FontSize.exit
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 1, i32 2, i8* getelementptr ([20 x i8], [20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8], [10 x i8]* @.str81872, i32 0, i32 0)) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 1, i32 2, ptr @.str24239, i32 0, ptr @no_file_pos, ptr @.str81872) nounwind
 	br label %FontName.exit
 
 FontName.exit:		; preds = %bb.i5, %FontSize.exit
-	%3 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8], [8 x i8]* @.str1822946, i32 0, i32 0), i32 %1, i8* undef) nounwind		; <i32> [#uses=0]
-	%4 = call  i32 @"\01_fwrite"(i8* getelementptr ([11 x i8], [11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind		; <i32> [#uses=0]
+	%3 = call  i32 (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str1822946, i32 %1, ptr undef) nounwind		; <i32> [#uses=0]
+	%4 = call  i32 @"\01_fwrite"(ptr @.str1842948, i32 1, i32 10, ptr undef) nounwind		; <i32> [#uses=0]
 	%5 = sub i32 %colmark, undef		; <i32> [#uses=1]
 	%6 = sub i32 %rowmark, undef		; <i32> [#uses=1]
-	%7 = load %struct.FILE*, %struct.FILE** @out_fp, align 4		; <%struct.FILE*> [#uses=1]
-	%8 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* %7, i8* getelementptr ([17 x i8], [17 x i8]* @.str212784, i32 0, i32 0), i32 %5, i32 %6) nounwind		; <i32> [#uses=0]
-	store i32 0, i32* @cpexists, align 4
-	%9 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1		; <i32*> [#uses=1]
-	%10 = load i32, i32* %9, align 4		; <i32> [#uses=1]
+	%7 = load ptr, ptr @out_fp, align 4		; <ptr> [#uses=1]
+	%8 = call  i32 (ptr, ptr, ...) @fprintf(ptr %7, ptr @.str212784, i32 %5, i32 %6) nounwind		; <i32> [#uses=0]
+	store i32 0, ptr @cpexists, align 4
+	%9 = getelementptr %struct.rec, ptr %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1		; <ptr> [#uses=1]
+	%10 = load i32, ptr %9, align 4		; <i32> [#uses=1]
 	%11 = sub i32 0, %10		; <i32> [#uses=1]
-	%12 = load %struct.FILE*, %struct.FILE** @out_fp, align 4		; <%struct.FILE*> [#uses=1]
-	%13 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* %12, i8* getelementptr ([17 x i8], [17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %11) nounwind		; <i32> [#uses=0]
-	store i32 0, i32* @cpexists, align 4
+	%12 = load ptr, ptr @out_fp, align 4		; <ptr> [#uses=1]
+	%13 = call  i32 (ptr, ptr, ...) @fprintf(ptr %12, ptr @.str212784, i32 undef, i32 %11) nounwind		; <i32> [#uses=0]
+	store i32 0, ptr @cpexists, align 4
 	br label %bb100.outer.outer
 
 bb100.outer.outer:		; preds = %bb79.critedge, %bb1.i3, %FontName.exit
-	%x_addr.0.ph.ph = phi %struct.rec* [ %x, %FontName.exit ], [ null, %bb79.critedge ], [ null, %bb1.i3 ]		; <%struct.rec*> [#uses=1]
-	%14 = getelementptr %struct.rec, %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0		; <%struct.FILE_POS*> [#uses=0]
+	%x_addr.0.ph.ph = phi ptr [ %x, %FontName.exit ], [ null, %bb79.critedge ], [ null, %bb1.i3 ]		; <ptr> [#uses=1]
+	%14 = getelementptr %struct.rec, ptr %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=0]
 	br label %bb100.outer
 
 bb.i80:		; preds = %bb3.i85
@@ -116,7 +116,7 @@ bb.i47:		; preds = %bb3.i52
 	br i1 undef, label %bb2.i51, label %bb2.i.i15.critedge
 
 bb2.i51:		; preds = %bb.i47, %StringBeginsWith.exit88, %bb.i80
-	%15 = load i8, i8* undef, align 1		; <i8> [#uses=0]
+	%15 = load i8, ptr undef, align 1		; <i8> [#uses=0]
 	br i1 false, label %StringBeginsWith.exit55thread-split, label %bb3.i52
 
 bb3.i52:		; preds = %bb2.i51
@@ -132,7 +132,7 @@ bb2.i41:		; preds = %bb2.i41, %StringBeginsWith.exit55
 	br label %bb2.i41
 
 bb2.i.i15.critedge:		; preds = %bb.i47
-	%16 = call  i8* @fgets(i8* undef, i32 512, %struct.FILE* %0) nounwind		; <i8*> [#uses=0]
+	%16 = call  ptr @fgets(ptr undef, i32 512, ptr %0) nounwind		; <ptr> [#uses=0]
 	%iftmp.560.0 = select i1 undef, i32 2, i32 0		; <i32> [#uses=1]
 	br label %bb100.outer
 
@@ -143,7 +143,7 @@ bb1.i3:		; preds = %bb2.i8
 	br label %bb100.outer.outer
 
 bb79.critedge:		; preds = %bb2.i8
-	store %struct.rec* null, %struct.rec** @zz_tmp, align 4
+	store ptr null, ptr @zz_tmp, align 4
 	br label %bb100.outer.outer
 
 bb100.outer:		; preds = %bb2.i.i15.critedge, %bb100.outer.outer

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
index 5656d78423949..927c3409c8f0b 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
@@ -3,81 +3,81 @@
 	%0 = type { i16, i8, i8 }		; type %0
 	%1 = type { [2 x i32], [2 x i32] }		; type %1
 	%2 = type { %struct.GAP }		; type %2
-	%3 = type { %struct.rec* }		; type %3
+	%3 = type { ptr }		; type %3
 	%4 = type { i8, i8, i16, i8, i8, i8, i8 }		; type %4
 	%5 = type { i8, i8, i8, i8 }		; type %5
 	%struct.COMPOSITE = type { i8, i16, i16 }
-	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.FILE_POS = type { i8, i8, i16, i32 }
 	%struct.FIRST_UNION = type { %struct.FILE_POS }
-	%struct.FONT_INFO = type { %struct.metrics*, i8*, i16*, %struct.COMPOSITE*, i32, %struct.rec*, %struct.rec*, i16, i16, i16*, i8*, i8*, i16* }
+	%struct.FONT_INFO = type { ptr, ptr, ptr, ptr, i32, ptr, ptr, i16, i16, ptr, ptr, ptr, ptr }
 	%struct.FOURTH_UNION = type { %struct.STYLE }
 	%struct.GAP = type { i8, i8, i16 }
-	%struct.LIST = type { %struct.rec*, %struct.rec* }
+	%struct.LIST = type { ptr, ptr }
 	%struct.SECOND_UNION = type { %0 }
 	%struct.STYLE = type { %2, %2, i16, i16, i32 }
 	%struct.THIRD_UNION = type { %1 }
 	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { i8*, i32 }
-	%struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
+	%struct.__sbuf = type { ptr, i32 }
+	%struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, ptr, %3, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32 }
 	%struct.metrics = type { i16, i16, i16, i16, i16 }
 	%struct.rec = type { %struct.head_type }
- at .str24239 = external constant [20 x i8], align 1		; <[20 x i8]*> [#uses=1]
- at no_file_pos = external global %4		; <%4*> [#uses=1]
- at .str19294 = external constant [9 x i8], align 1		; <[9 x i8]*> [#uses=1]
- at zz_lengths = external global [150 x i8]		; <[150 x i8]*> [#uses=1]
- at next_free.4772 = external global i8**		; <i8***> [#uses=3]
- at top_free.4773 = external global i8**		; <i8***> [#uses=2]
- at .str1575 = external constant [32 x i8], align 1		; <[32 x i8]*> [#uses=1]
- at zz_free = external global [524 x %struct.rec*]		; <[524 x %struct.rec*]*> [#uses=2]
- at zz_hold = external global %struct.rec*		; <%struct.rec**> [#uses=5]
- at zz_tmp = external global %struct.rec*		; <%struct.rec**> [#uses=2]
- at zz_res = external global %struct.rec*		; <%struct.rec**> [#uses=2]
- at xx_link = external global %struct.rec*		; <%struct.rec**> [#uses=2]
- at font_count = external global i32		; <i32*> [#uses=1]
- at .str81872 = external constant [10 x i8], align 1		; <[10 x i8]*> [#uses=1]
- at .str101874 = external constant [30 x i8], align 1		; <[30 x i8]*> [#uses=1]
- at .str111875 = external constant [17 x i8], align 1		; <[17 x i8]*> [#uses=1]
- at .str141878 = external constant [27 x i8], align 1		; <[27 x i8]*> [#uses=1]
- at out_fp = external global %struct.FILE*		; <%struct.FILE**> [#uses=3]
- at .str192782 = external constant [17 x i8], align 1		; <[17 x i8]*> [#uses=1]
- at cpexists = external global i32		; <i32*> [#uses=2]
- at .str212784 = external constant [17 x i8], align 1		; <[17 x i8]*> [#uses=1]
- at currentfont = external global i32		; <i32*> [#uses=3]
- at wordcount = external global i32		; <i32*> [#uses=1]
- at needs = external global %struct.rec*		; <%struct.rec**> [#uses=1]
- at .str742838 = external constant [6 x i8], align 1		; <[6 x i8]*> [#uses=1]
- at .str752839 = external constant [10 x i8], align 1		; <[10 x i8]*> [#uses=1]
- at .str1802944 = external constant [40 x i8], align 1		; <[40 x i8]*> [#uses=1]
- at .str1822946 = external constant [8 x i8], align 1		; <[8 x i8]*> [#uses=1]
- at .str1842948 = external constant [11 x i8], align 1		; <[11 x i8]*> [#uses=1]
- at .str1852949 = external constant [23 x i8], align 1		; <[23 x i8]*> [#uses=1]
- at .str1872951 = external constant [17 x i8], align 1		; <[17 x i8]*> [#uses=1]
- at .str1932957 = external constant [26 x i8], align 1		; <[26 x i8]*> [#uses=1]
-
-declare i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
-
-declare i32 @"\01_fwrite"(i8*, i32, i32, i8*)
-
-declare i32 @remove(i8* nocapture) nounwind
-
-declare %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
-
-declare %struct.rec* @MakeWord(i32, i8* nocapture, %struct.FILE_POS*) nounwind
-
-declare void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
-
-declare i32 @"\01_fputs"(i8*, %struct.FILE*)
-
-declare noalias i8* @calloc(i32, i32) nounwind
-
-declare i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
-
-define void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
+ at .str24239 = external constant [20 x i8], align 1		; <ptr> [#uses=1]
+ at no_file_pos = external global %4		; <ptr> [#uses=1]
+ at .str19294 = external constant [9 x i8], align 1		; <ptr> [#uses=1]
+ at zz_lengths = external global [150 x i8]		; <ptr> [#uses=1]
+ at next_free.4772 = external global ptr		; <ptr> [#uses=3]
+ at top_free.4773 = external global ptr		; <ptr> [#uses=2]
+ at .str1575 = external constant [32 x i8], align 1		; <ptr> [#uses=1]
+ at zz_free = external global [524 x ptr]		; <ptr> [#uses=2]
+ at zz_hold = external global ptr		; <ptr> [#uses=5]
+ at zz_tmp = external global ptr		; <ptr> [#uses=2]
+ at zz_res = external global ptr		; <ptr> [#uses=2]
+ at xx_link = external global ptr		; <ptr> [#uses=2]
+ at font_count = external global i32		; <ptr> [#uses=1]
+ at .str81872 = external constant [10 x i8], align 1		; <ptr> [#uses=1]
+ at .str101874 = external constant [30 x i8], align 1		; <ptr> [#uses=1]
+ at .str111875 = external constant [17 x i8], align 1		; <ptr> [#uses=1]
+ at .str141878 = external constant [27 x i8], align 1		; <ptr> [#uses=1]
+ at out_fp = external global ptr		; <ptr> [#uses=3]
+ at .str192782 = external constant [17 x i8], align 1		; <ptr> [#uses=1]
+ at cpexists = external global i32		; <ptr> [#uses=2]
+ at .str212784 = external constant [17 x i8], align 1		; <ptr> [#uses=1]
+ at currentfont = external global i32		; <ptr> [#uses=3]
+ at wordcount = external global i32		; <ptr> [#uses=1]
+ at needs = external global ptr		; <ptr> [#uses=1]
+ at .str742838 = external constant [6 x i8], align 1		; <ptr> [#uses=1]
+ at .str752839 = external constant [10 x i8], align 1		; <ptr> [#uses=1]
+ at .str1802944 = external constant [40 x i8], align 1		; <ptr> [#uses=1]
+ at .str1822946 = external constant [8 x i8], align 1		; <ptr> [#uses=1]
+ at .str1842948 = external constant [11 x i8], align 1		; <ptr> [#uses=1]
+ at .str1852949 = external constant [23 x i8], align 1		; <ptr> [#uses=1]
+ at .str1872951 = external constant [17 x i8], align 1		; <ptr> [#uses=1]
+ at .str1932957 = external constant [26 x i8], align 1		; <ptr> [#uses=1]
+
+declare i32 @fprintf(ptr nocapture, ptr nocapture, ...) nounwind
+
+declare i32 @"\01_fwrite"(ptr, i32, i32, ptr)
+
+declare i32 @remove(ptr nocapture) nounwind
+
+declare ptr @OpenIncGraphicFile(ptr, i8 zeroext, ptr nocapture, ptr, ptr nocapture) nounwind
+
+declare ptr @MakeWord(i32, ptr nocapture, ptr) nounwind
+
+declare void @Error(i32, i32, ptr, i32, ptr, ...) nounwind
+
+declare i32 @"\01_fputs"(ptr, ptr)
+
+declare noalias ptr @calloc(i32, i32) nounwind
+
+declare ptr @fgets(ptr, i32, ptr nocapture) nounwind
+
+define void @PS_PrintGraphicInclude(ptr %x, i32 %colmark, i32 %rowmark) nounwind {
 entry:
-	%buff = alloca [512 x i8], align 4		; <[512 x i8]*> [#uses=5]
-	%0 = getelementptr %struct.rec, %struct.rec* %x, i32 0, i32 0, i32 1, i32 0, i32 0		; <i8*> [#uses=2]
-	%1 = load i8, i8* %0, align 4		; <i8> [#uses=1]
+	%buff = alloca [512 x i8], align 4		; <ptr> [#uses=5]
+	%0 = getelementptr %struct.rec, ptr %x, i32 0, i32 0, i32 1, i32 0, i32 0		; <ptr> [#uses=2]
+	%1 = load i8, ptr %0, align 4		; <i8> [#uses=1]
 	%2 = add i8 %1, -94		; <i8> [#uses=1]
 	%3 = icmp ugt i8 %2, 1		; <i1> [#uses=1]
 	br i1 %3, label %bb, label %bb1
@@ -86,50 +86,49 @@ bb:		; preds = %entry
 	br label %bb1
 
 bb1:		; preds = %bb, %entry
-	%4 = getelementptr %struct.rec, %struct.rec* %x, i32 0, i32 0, i32 2		; <%struct.SECOND_UNION*> [#uses=1]
-	%5 = bitcast %struct.SECOND_UNION* %4 to %5*		; <%5*> [#uses=1]
-	%6 = getelementptr %5, %5* %5, i32 0, i32 1		; <i8*> [#uses=1]
-	%7 = load i8, i8* %6, align 1		; <i8> [#uses=1]
-	%8 = icmp eq i8 %7, 0		; <i1> [#uses=1]
-	br i1 %8, label %bb2, label %bb3
+	%4 = getelementptr %struct.rec, ptr %x, i32 0, i32 0, i32 2		; <ptr> [#uses=1]
+	%5 = getelementptr %4, ptr %4, i32 0, i32 1		; <ptr> [#uses=1]
+	%6 = load i8, ptr %5, align 1		; <i8> [#uses=1]
+	%7 = icmp eq i8 %6, 0		; <i1> [#uses=1]
+	br i1 %7, label %bb2, label %bb3
 
 bb2:		; preds = %bb1
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 1, i32 2, i8* getelementptr ([20 x i8], [20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([40 x i8], [40 x i8]* @.str1802944, i32 0, i32 0)) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 1, i32 2, ptr @.str24239, i32 0, ptr @no_file_pos, ptr @.str1802944) nounwind
 	br label %bb3
 
 bb3:		; preds = %bb2, %bb1
-	%9 = load %struct.rec*, %struct.rec** undef, align 4		; <%struct.rec*> [#uses=0]
+	%8 = load ptr, ptr undef, align 4		; <ptr> [#uses=0]
 	br label %bb5
 
 bb5:		; preds = %bb5, %bb3
-	%y.0 = load %struct.rec*, %struct.rec** null		; <%struct.rec*> [#uses=2]
+	%y.0 = load ptr, ptr null		; <ptr> [#uses=2]
 	br i1 false, label %bb5, label %bb6
 
 bb6:		; preds = %bb5
-	%10 = load i8, i8* %0, align 4		; <i8> [#uses=1]
-	%11 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 1, i32 0		; <%struct.FILE_POS*> [#uses=1]
-	%12 = call  %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext %10, %struct.rec** null, %struct.FILE_POS* %11, i32* undef) nounwind		; <%struct.FILE*> [#uses=4]
+	%9 = load i8, ptr %0, align 4		; <i8> [#uses=1]
+	%10 = getelementptr %struct.rec, ptr %y.0, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=1]
+	%11 = call  ptr @OpenIncGraphicFile(ptr undef, i8 zeroext %9, ptr null, ptr %10, ptr undef) nounwind		; <ptr> [#uses=4]
 	br i1 false, label %bb7, label %bb8
 
 bb7:		; preds = %bb6
 	unreachable
 
 bb8:		; preds = %bb6
-	%13 = and i32 undef, 4095		; <i32> [#uses=2]
-	%14 = load i32, i32* @currentfont, align 4		; <i32> [#uses=0]
+	%12 = and i32 undef, 4095		; <i32> [#uses=2]
+	%13 = load i32, ptr @currentfont, align 4		; <i32> [#uses=0]
 	br i1 false, label %bb10, label %bb9
 
 bb9:		; preds = %bb8
-	%15 = icmp ult i32 0, %13		; <i1> [#uses=1]
-	br i1 %15, label %bb.i, label %FontHalfXHeight.exit
+	%14 = icmp ult i32 0, %12		; <i1> [#uses=1]
+	br i1 %14, label %bb.i, label %FontHalfXHeight.exit
 
 bb.i:		; preds = %bb9
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 1, i32 2, i8* getelementptr ([20 x i8], [20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([17 x i8], [17 x i8]* @.str111875, i32 0, i32 0)) nounwind
-	%.pre186 = load i32, i32* @currentfont, align 4		; <i32> [#uses=1]
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 1, i32 2, ptr @.str24239, i32 0, ptr @no_file_pos, ptr @.str111875) nounwind
+	%.pre186 = load i32, ptr @currentfont, align 4		; <i32> [#uses=1]
 	br label %FontHalfXHeight.exit
 
 FontHalfXHeight.exit:		; preds = %bb.i, %bb9
-	%16 = phi i32 [ %.pre186, %bb.i ], [ %13, %bb9 ]		; <i32> [#uses=1]
+	%15 = phi i32 [ %.pre186, %bb.i ], [ %12, %bb9 ]		; <i32> [#uses=1]
 	br i1 false, label %bb.i1, label %bb1.i
 
 bb.i1:		; preds = %FontHalfXHeight.exit
@@ -139,82 +138,82 @@ bb1.i:		; preds = %bb.i1, %FontHalfXHeight.exit
 	br i1 undef, label %bb2.i, label %FontSize.exit
 
 bb2.i:		; preds = %bb1.i
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 37, i32 61, i8* getelementptr ([30 x i8], [30 x i8]* @.str101874, i32 0, i32 0), i32 1, %struct.FILE_POS* null) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 37, i32 61, ptr @.str101874, i32 1, ptr null) nounwind
 	unreachable
 
 FontSize.exit:		; preds = %bb1.i
-	%17 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* undef, i32 %16, i32 5		; <%struct.rec**> [#uses=0]
-	%18 = load i32, i32* undef, align 4		; <i32> [#uses=1]
-	%19 = load i32, i32* @currentfont, align 4		; <i32> [#uses=2]
-	%20 = load i32, i32* @font_count, align 4		; <i32> [#uses=1]
-	%21 = icmp ult i32 %20, %19		; <i1> [#uses=1]
-	br i1 %21, label %bb.i5, label %FontName.exit
+	%16 = getelementptr %struct.FONT_INFO, ptr undef, i32 %15, i32 5		; <ptr> [#uses=0]
+	%17 = load i32, ptr undef, align 4		; <i32> [#uses=1]
+	%18 = load i32, ptr @currentfont, align 4		; <i32> [#uses=2]
+	%19 = load i32, ptr @font_count, align 4		; <i32> [#uses=1]
+	%20 = icmp ult i32 %19, %18		; <i1> [#uses=1]
+	br i1 %20, label %bb.i5, label %FontName.exit
 
 bb.i5:		; preds = %FontSize.exit
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 1, i32 2, i8* getelementptr ([20 x i8], [20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8], [10 x i8]* @.str81872, i32 0, i32 0)) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 1, i32 2, ptr @.str24239, i32 0, ptr @no_file_pos, ptr @.str81872) nounwind
 	br label %FontName.exit
 
 FontName.exit:		; preds = %bb.i5, %FontSize.exit
-	%22 = phi %struct.FONT_INFO* [ undef, %bb.i5 ], [ undef, %FontSize.exit ]		; <%struct.FONT_INFO*> [#uses=1]
-	%23 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* %22, i32 %19, i32 5		; <%struct.rec**> [#uses=0]
-	%24 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8], [8 x i8]* @.str1822946, i32 0, i32 0), i32 %18, i8* null) nounwind		; <i32> [#uses=0]
+	%21 = phi ptr [ undef, %bb.i5 ], [ undef, %FontSize.exit ]		; <ptr> [#uses=1]
+	%22 = getelementptr %struct.FONT_INFO, ptr %21, i32 %18, i32 5		; <ptr> [#uses=0]
+	%23 = call  i32 (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str1822946, i32 %17, ptr null) nounwind		; <i32> [#uses=0]
 	br label %bb10
 
 bb10:		; preds = %FontName.exit, %bb8
-	%25 = call  i32 @"\01_fwrite"(i8* getelementptr ([11 x i8], [11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind		; <i32> [#uses=0]
-	%26 = sub i32 %rowmark, undef		; <i32> [#uses=1]
-	%27 = load %struct.FILE*, %struct.FILE** @out_fp, align 4		; <%struct.FILE*> [#uses=1]
-	%28 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* %27, i8* getelementptr ([17 x i8], [17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %26) nounwind		; <i32> [#uses=0]
-	store i32 0, i32* @cpexists, align 4
-	%29 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* undef, i8* getelementptr ([17 x i8], [17 x i8]* @.str192782, i32 0, i32 0), double 2.000000e+01, double 2.000000e+01) nounwind		; <i32> [#uses=0]
-	%30 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0		; <i32*> [#uses=1]
-	%31 = load i32, i32* %30, align 4		; <i32> [#uses=1]
-	%32 = sub i32 0, %31		; <i32> [#uses=1]
-	%33 = load i32, i32* undef, align 4		; <i32> [#uses=1]
-	%34 = sub i32 0, %33		; <i32> [#uses=1]
-	%35 = load %struct.FILE*, %struct.FILE** @out_fp, align 4		; <%struct.FILE*> [#uses=1]
-	%36 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* %35, i8* getelementptr ([17 x i8], [17 x i8]* @.str212784, i32 0, i32 0), i32 %32, i32 %34) nounwind		; <i32> [#uses=0]
-	store i32 0, i32* @cpexists, align 4
-	%37 = load %struct.rec*, %struct.rec** null, align 4		; <%struct.rec*> [#uses=1]
-	%38 = getelementptr %struct.rec, %struct.rec* %37, i32 0, i32 0, i32 4		; <%struct.FOURTH_UNION*> [#uses=1]
-	%39 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* undef, i8* getelementptr ([23 x i8], [23 x i8]* @.str1852949, i32 0, i32 0), %struct.FOURTH_UNION* %38) nounwind		; <i32> [#uses=0]
-	%buff14 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 0		; <i8*> [#uses=5]
-	%40 = call  i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind		; <i8*> [#uses=0]
+	%24 = call  i32 @"\01_fwrite"(ptr @.str1842948, i32 1, i32 10, ptr undef) nounwind		; <i32> [#uses=0]
+	%25 = sub i32 %rowmark, undef		; <i32> [#uses=1]
+	%26 = load ptr, ptr @out_fp, align 4		; <ptr> [#uses=1]
+	%27 = call  i32 (ptr, ptr, ...) @fprintf(ptr %26, ptr @.str212784, i32 undef, i32 %25) nounwind		; <i32> [#uses=0]
+	store i32 0, ptr @cpexists, align 4
+	%28 = call  i32 (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str192782, double 2.000000e+01, double 2.000000e+01) nounwind		; <i32> [#uses=0]
+	%29 = getelementptr %struct.rec, ptr %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0		; <ptr> [#uses=1]
+	%30 = load i32, ptr %29, align 4		; <i32> [#uses=1]
+	%31 = sub i32 0, %30		; <i32> [#uses=1]
+	%32 = load i32, ptr undef, align 4		; <i32> [#uses=1]
+	%33 = sub i32 0, %32		; <i32> [#uses=1]
+	%34 = load ptr, ptr @out_fp, align 4		; <ptr> [#uses=1]
+	%35 = call  i32 (ptr, ptr, ...) @fprintf(ptr %34, ptr @.str212784, i32 %31, i32 %33) nounwind		; <i32> [#uses=0]
+	store i32 0, ptr @cpexists, align 4
+	%36 = load ptr, ptr null, align 4		; <ptr> [#uses=1]
+	%37 = getelementptr %struct.rec, ptr %36, i32 0, i32 0, i32 4		; <ptr> [#uses=1]
+	%38 = call  i32 (ptr, ptr, ...) @fprintf(ptr undef, ptr @.str1852949, ptr %37) nounwind		; <i32> [#uses=0]
+	%buff14 = getelementptr [512 x i8], ptr %buff, i32 0, i32 0		; <ptr> [#uses=5]
+	%39 = call  ptr @fgets(ptr %buff14, i32 512, ptr %11) nounwind		; <ptr> [#uses=0]
 	%iftmp.506.0 = select i1 undef, i32 2, i32 0		; <i32> [#uses=1]
-	%41 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 26		; <i8*> [#uses=1]
+	%40 = getelementptr [512 x i8], ptr %buff, i32 0, i32 26		; <ptr> [#uses=1]
 	br label %bb100.outer.outer
 
 bb100.outer.outer:		; preds = %bb83, %bb10
 	%state.0.ph.ph = phi i32 [ %iftmp.506.0, %bb10 ], [ undef, %bb83 ]		; <i32> [#uses=1]
-	%x_addr.0.ph.ph = phi %struct.rec* [ %x, %bb10 ], [ %71, %bb83 ]		; <%struct.rec*> [#uses=1]
-	%42 = getelementptr %struct.rec, %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0		; <%struct.FILE_POS*> [#uses=0]
+	%x_addr.0.ph.ph = phi ptr [ %x, %bb10 ], [ %70, %bb83 ]		; <ptr> [#uses=1]
+	%41 = getelementptr %struct.rec, ptr %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=0]
 	br label %bb100.outer
 
 bb.i80:		; preds = %bb3.i85
-	%43 = icmp eq i8 %44, %46		; <i1> [#uses=1]
+	%42 = icmp eq i8 %43, %45		; <i1> [#uses=1]
 	%indvar.next.i79 = add i32 %indvar.i81, 1		; <i32> [#uses=1]
-	br i1 %43, label %bb2.i84, label %bb2.i51
+	br i1 %42, label %bb2.i84, label %bb2.i51
 
 bb2.i84:		; preds = %bb100.outer, %bb.i80
 	%indvar.i81 = phi i32 [ %indvar.next.i79, %bb.i80 ], [ 0, %bb100.outer ]		; <i32> [#uses=3]
-	%pp.0.i82 = getelementptr [27 x i8], [27 x i8]* @.str141878, i32 0, i32 %indvar.i81		; <i8*> [#uses=2]
-	%sp.0.i83 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 %indvar.i81		; <i8*> [#uses=1]
-	%44 = load i8, i8* %sp.0.i83, align 1		; <i8> [#uses=2]
-	%45 = icmp eq i8 %44, 0		; <i1> [#uses=1]
-	br i1 %45, label %StringBeginsWith.exit88thread-split, label %bb3.i85
+	%pp.0.i82 = getelementptr [27 x i8], ptr @.str141878, i32 0, i32 %indvar.i81		; <ptr> [#uses=2]
+	%sp.0.i83 = getelementptr [512 x i8], ptr %buff, i32 0, i32 %indvar.i81		; <ptr> [#uses=1]
+	%43 = load i8, ptr %sp.0.i83, align 1		; <i8> [#uses=2]
+	%44 = icmp eq i8 %43, 0		; <i1> [#uses=1]
+	br i1 %44, label %StringBeginsWith.exit88thread-split, label %bb3.i85
 
 bb3.i85:		; preds = %bb2.i84
-	%46 = load i8, i8* %pp.0.i82, align 1		; <i8> [#uses=3]
-	%47 = icmp eq i8 %46, 0		; <i1> [#uses=1]
-	br i1 %47, label %StringBeginsWith.exit88, label %bb.i80
+	%45 = load i8, ptr %pp.0.i82, align 1		; <i8> [#uses=3]
+	%46 = icmp eq i8 %45, 0		; <i1> [#uses=1]
+	br i1 %46, label %StringBeginsWith.exit88, label %bb.i80
 
 StringBeginsWith.exit88thread-split:		; preds = %bb2.i84
-	%.pr = load i8, i8* %pp.0.i82		; <i8> [#uses=1]
+	%.pr = load i8, ptr %pp.0.i82		; <i8> [#uses=1]
 	br label %StringBeginsWith.exit88
 
 StringBeginsWith.exit88:		; preds = %StringBeginsWith.exit88thread-split, %bb3.i85
-	%48 = phi i8 [ %.pr, %StringBeginsWith.exit88thread-split ], [ %46, %bb3.i85 ]		; <i8> [#uses=1]
-	%phitmp91 = icmp eq i8 %48, 0		; <i1> [#uses=1]
+	%47 = phi i8 [ %.pr, %StringBeginsWith.exit88thread-split ], [ %45, %bb3.i85 ]		; <i8> [#uses=1]
+	%phitmp91 = icmp eq i8 %47, 0		; <i1> [#uses=1]
 	br i1 %phitmp91, label %bb3.i77, label %bb2.i51
 
 bb2.i.i68:		; preds = %bb3.i77
@@ -224,19 +223,19 @@ bb2.i75:		; preds = %bb2.i.i68
 	br label %bb3.i77
 
 bb3.i77:		; preds = %bb2.i75, %StringBeginsWith.exit88
-	%sp.0.i76 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 undef		; <i8*> [#uses=1]
-	%49 = load i8, i8* %sp.0.i76, align 1		; <i8> [#uses=1]
-	%50 = icmp eq i8 %49, 0		; <i1> [#uses=1]
-	br i1 %50, label %bb24, label %bb2.i.i68
+	%sp.0.i76 = getelementptr [512 x i8], ptr %buff, i32 0, i32 undef		; <ptr> [#uses=1]
+	%48 = load i8, ptr %sp.0.i76, align 1		; <i8> [#uses=1]
+	%49 = icmp eq i8 %48, 0		; <i1> [#uses=1]
+	br i1 %49, label %bb24, label %bb2.i.i68
 
 bb24:		; preds = %bb3.i77
-	%51 = call  %struct.rec* @MakeWord(i32 11, i8* %41, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind		; <%struct.rec*> [#uses=0]
-	%52 = load i8, i8* getelementptr ([150 x i8], [150 x i8]* @zz_lengths, i32 0, i32 0), align 4		; <i8> [#uses=1]
-	%53 = zext i8 %52 to i32		; <i32> [#uses=2]
-	%54 = getelementptr [524 x %struct.rec*], [524 x %struct.rec*]* @zz_free, i32 0, i32 %53		; <%struct.rec**> [#uses=2]
-	%55 = load %struct.rec*, %struct.rec** %54, align 4		; <%struct.rec*> [#uses=3]
-	%56 = icmp eq %struct.rec* %55, null		; <i1> [#uses=1]
-	br i1 %56, label %bb27, label %bb28
+	%50 = call  ptr @MakeWord(i32 11, ptr %40, ptr @no_file_pos) nounwind		; <ptr> [#uses=0]
+	%51 = load i8, ptr @zz_lengths, align 4		; <i8> [#uses=1]
+	%52 = zext i8 %51 to i32		; <i32> [#uses=2]
+	%53 = getelementptr [524 x ptr], ptr @zz_free, i32 0, i32 %52		; <ptr> [#uses=2]
+	%54 = load ptr, ptr %53, align 4		; <ptr> [#uses=3]
+	%55 = icmp eq ptr %54, null		; <i1> [#uses=1]
+	br i1 %55, label %bb27, label %bb28
 
 bb27:		; preds = %bb24
 	br i1 undef, label %bb.i56, label %GetMemory.exit62
@@ -245,64 +244,64 @@ bb.i56:		; preds = %bb27
 	br i1 undef, label %bb1.i58, label %bb2.i60
 
 bb1.i58:		; preds = %bb.i56
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 31, i32 1, i8* getelementptr ([32 x i8], [32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 31, i32 1, ptr @.str1575, i32 1, ptr @no_file_pos) nounwind
 	br label %bb2.i60
 
 bb2.i60:		; preds = %bb1.i58, %bb.i56
-	%.pre1.i59 = phi i8** [ undef, %bb1.i58 ], [ undef, %bb.i56 ]		; <i8**> [#uses=1]
-	store i8** undef, i8*** @top_free.4773, align 4
+	%.pre1.i59 = phi ptr [ undef, %bb1.i58 ], [ undef, %bb.i56 ]		; <ptr> [#uses=1]
+	store ptr undef, ptr @top_free.4773, align 4
 	br label %GetMemory.exit62
 
 GetMemory.exit62:		; preds = %bb2.i60, %bb27
-	%57 = phi i8** [ %.pre1.i59, %bb2.i60 ], [ undef, %bb27 ]		; <i8**> [#uses=1]
-	%58 = getelementptr i8*, i8** %57, i32 %53		; <i8**> [#uses=1]
-	store i8** %58, i8*** @next_free.4772, align 4
-	store %struct.rec* undef, %struct.rec** @zz_hold, align 4
+	%56 = phi ptr [ %.pre1.i59, %bb2.i60 ], [ undef, %bb27 ]		; <ptr> [#uses=1]
+	%57 = getelementptr ptr, ptr %56, i32 %52		; <ptr> [#uses=1]
+	store ptr %57, ptr @next_free.4772, align 4
+	store ptr undef, ptr @zz_hold, align 4
 	br label %bb29
 
 bb28:		; preds = %bb24
-	store %struct.rec* %55, %struct.rec** @zz_hold, align 4
-	%59 = load %struct.rec*, %struct.rec** null, align 4		; <%struct.rec*> [#uses=1]
-	store %struct.rec* %59, %struct.rec** %54, align 4
+	store ptr %54, ptr @zz_hold, align 4
+	%58 = load ptr, ptr null, align 4		; <ptr> [#uses=1]
+	store ptr %58, ptr %53, align 4
 	br label %bb29
 
 bb29:		; preds = %bb28, %GetMemory.exit62
-	%.pre184 = phi %struct.rec* [ %55, %bb28 ], [ undef, %GetMemory.exit62 ]		; <%struct.rec*> [#uses=3]
-	store i8 0, i8* undef
-	store %struct.rec* %.pre184, %struct.rec** @xx_link, align 4
+	%.pre184 = phi ptr [ %54, %bb28 ], [ undef, %GetMemory.exit62 ]		; <ptr> [#uses=3]
+	store i8 0, ptr undef
+	store ptr %.pre184, ptr @xx_link, align 4
 	br i1 undef, label %bb35, label %bb31
 
 bb31:		; preds = %bb29
-	store %struct.rec* %.pre184, %struct.rec** undef
+	store ptr %.pre184, ptr undef
 	br label %bb35
 
 bb35:		; preds = %bb31, %bb29
 	br i1 undef, label %bb41, label %bb37
 
 bb37:		; preds = %bb35
-	%60 = load %struct.rec*, %struct.rec** null, align 4		; <%struct.rec*> [#uses=1]
-	store %struct.rec* %60, %struct.rec** undef
-	store %struct.rec* undef, %struct.rec** null
-	store %struct.rec* %.pre184, %struct.rec** null, align 4
+	%59 = load ptr, ptr null, align 4		; <ptr> [#uses=1]
+	store ptr %59, ptr undef
+	store ptr undef, ptr null
+	store ptr %.pre184, ptr null, align 4
 	br label %bb41
 
 bb41:		; preds = %bb37, %bb35
-	%61 = call  i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind		; <i8*> [#uses=1]
-	%62 = icmp eq i8* %61, null		; <i1> [#uses=1]
-	%iftmp.554.0 = select i1 %62, i32 2, i32 1		; <i32> [#uses=1]
+	%60 = call  ptr @fgets(ptr %buff14, i32 512, ptr %11) nounwind		; <ptr> [#uses=1]
+	%61 = icmp eq ptr %60, null		; <i1> [#uses=1]
+	%iftmp.554.0 = select i1 %61, i32 2, i32 1		; <i32> [#uses=1]
 	br label %bb100.outer
 
 bb.i47:		; preds = %bb3.i52
-	%63 = icmp eq i8 %64, %65		; <i1> [#uses=1]
-	br i1 %63, label %bb2.i51, label %bb2.i41
+	%62 = icmp eq i8 %63, %64		; <i1> [#uses=1]
+	br i1 %62, label %bb2.i51, label %bb2.i41
 
 bb2.i51:		; preds = %bb.i47, %bb2.i.i68, %StringBeginsWith.exit88, %bb.i80
-	%pp.0.i49 = getelementptr [17 x i8], [17 x i8]* @.str1872951, i32 0, i32 0		; <i8*> [#uses=1]
-	%64 = load i8, i8* null, align 1		; <i8> [#uses=1]
+	%pp.0.i49 = getelementptr [17 x i8], ptr @.str1872951, i32 0, i32 0		; <ptr> [#uses=1]
+	%63 = load i8, ptr null, align 1		; <i8> [#uses=1]
 	br i1 false, label %StringBeginsWith.exit55thread-split, label %bb3.i52
 
 bb3.i52:		; preds = %bb2.i51
-	%65 = load i8, i8* %pp.0.i49, align 1		; <i8> [#uses=1]
+	%64 = load i8, ptr %pp.0.i49, align 1		; <i8> [#uses=1]
 	br i1 false, label %StringBeginsWith.exit55, label %bb.i47
 
 StringBeginsWith.exit55thread-split:		; preds = %bb2.i51
@@ -318,35 +317,35 @@ bb2.i41:		; preds = %bb2.i41, %bb49, %StringBeginsWith.exit55, %bb.i47
 	br i1 false, label %bb2.i41, label %bb2.i.i15
 
 bb2.i.i15:		; preds = %bb2.i41
-	%pp.0.i.i13 = getelementptr [6 x i8], [6 x i8]* @.str742838, i32 0, i32 0		; <i8*> [#uses=1]
+	%pp.0.i.i13 = getelementptr [6 x i8], ptr @.str742838, i32 0, i32 0		; <ptr> [#uses=1]
 	br i1 false, label %StringBeginsWith.exitthread-split.i18, label %bb3.i.i16
 
 bb3.i.i16:		; preds = %bb2.i.i15
-	%66 = load i8, i8* %pp.0.i.i13, align 1		; <i8> [#uses=1]
+	%65 = load i8, ptr %pp.0.i.i13, align 1		; <i8> [#uses=1]
 	br label %StringBeginsWith.exit.i20
 
 StringBeginsWith.exitthread-split.i18:		; preds = %bb2.i.i15
 	br label %StringBeginsWith.exit.i20
 
 StringBeginsWith.exit.i20:		; preds = %StringBeginsWith.exitthread-split.i18, %bb3.i.i16
-	%67 = phi i8 [ undef, %StringBeginsWith.exitthread-split.i18 ], [ %66, %bb3.i.i16 ]		; <i8> [#uses=1]
-	%phitmp.i19 = icmp eq i8 %67, 0		; <i1> [#uses=1]
+	%66 = phi i8 [ undef, %StringBeginsWith.exitthread-split.i18 ], [ %65, %bb3.i.i16 ]		; <i8> [#uses=1]
+	%phitmp.i19 = icmp eq i8 %66, 0		; <i1> [#uses=1]
 	br i1 %phitmp.i19, label %bb58, label %bb2.i6.i26
 
 bb2.i6.i26:		; preds = %bb2.i6.i26, %StringBeginsWith.exit.i20
 	%indvar.i3.i23 = phi i32 [ %indvar.next.i1.i21, %bb2.i6.i26 ], [ 0, %StringBeginsWith.exit.i20 ]		; <i32> [#uses=3]
-	%sp.0.i5.i25 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 %indvar.i3.i23		; <i8*> [#uses=0]
-	%pp.0.i4.i24 = getelementptr [10 x i8], [10 x i8]* @.str752839, i32 0, i32 %indvar.i3.i23		; <i8*> [#uses=1]
-	%68 = load i8, i8* %pp.0.i4.i24, align 1		; <i8> [#uses=0]
+	%sp.0.i5.i25 = getelementptr [512 x i8], ptr %buff, i32 0, i32 %indvar.i3.i23		; <ptr> [#uses=0]
+	%pp.0.i4.i24 = getelementptr [10 x i8], ptr @.str752839, i32 0, i32 %indvar.i3.i23		; <ptr> [#uses=1]
+	%67 = load i8, ptr %pp.0.i4.i24, align 1		; <i8> [#uses=0]
 	%indvar.next.i1.i21 = add i32 %indvar.i3.i23, 1		; <i32> [#uses=1]
 	br i1 undef, label %bb2.i6.i26, label %bb55
 
 bb55:		; preds = %bb2.i6.i26
-	%69 = call  i32 @"\01_fputs"(i8* %buff14, %struct.FILE* undef) nounwind		; <i32> [#uses=0]
+	%68 = call  i32 @"\01_fputs"(ptr %buff14, ptr undef) nounwind		; <i32> [#uses=0]
 	unreachable
 
 bb58:		; preds = %StringBeginsWith.exit.i20
-	%70 = call  i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind		; <i8*> [#uses=0]
+	%69 = call  ptr @fgets(ptr %buff14, i32 512, ptr %11) nounwind		; <ptr> [#uses=0]
 	%iftmp.560.0 = select i1 undef, i32 2, i32 0		; <i32> [#uses=1]
 	br label %bb100.outer
 
@@ -367,79 +366,77 @@ StringBeginsWith.exit:		; preds = %StringBeginsWith.exitthread-split, %bb3.i
 	br i1 %phitmp93, label %bb66, label %bb2.i.i
 
 bb66:		; preds = %StringBeginsWith.exit
-	%71 = call  %struct.rec* @MakeWord(i32 11, i8* undef, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind		; <%struct.rec*> [#uses=4]
-	%72 = load i8, i8* getelementptr ([150 x i8], [150 x i8]* @zz_lengths, i32 0, i32 0), align 4		; <i8> [#uses=1]
-	%73 = zext i8 %72 to i32		; <i32> [#uses=2]
-	%74 = getelementptr [524 x %struct.rec*], [524 x %struct.rec*]* @zz_free, i32 0, i32 %73		; <%struct.rec**> [#uses=2]
-	%75 = load %struct.rec*, %struct.rec** %74, align 4		; <%struct.rec*> [#uses=3]
-	%76 = icmp eq %struct.rec* %75, null		; <i1> [#uses=1]
-	br i1 %76, label %bb69, label %bb70
+	%70 = call  ptr @MakeWord(i32 11, ptr undef, ptr @no_file_pos) nounwind		; <ptr> [#uses=4]
+	%71 = load i8, ptr @zz_lengths, align 4		; <i8> [#uses=1]
+	%72 = zext i8 %71 to i32		; <i32> [#uses=2]
+	%73 = getelementptr [524 x ptr], ptr @zz_free, i32 0, i32 %72		; <ptr> [#uses=2]
+	%74 = load ptr, ptr %73, align 4		; <ptr> [#uses=3]
+	%75 = icmp eq ptr %74, null		; <i1> [#uses=1]
+	br i1 %75, label %bb69, label %bb70
 
 bb69:		; preds = %bb66
 	br i1 undef, label %bb.i2, label %GetMemory.exit
 
 bb.i2:		; preds = %bb69
-	%77 = call  noalias i8* @calloc(i32 1020, i32 4) nounwind		; <i8*> [#uses=1]
-	%78 = bitcast i8* %77 to i8**		; <i8**> [#uses=3]
-	store i8** %78, i8*** @next_free.4772, align 4
+	%76 = call  noalias ptr @calloc(i32 1020, i32 4) nounwind		; <ptr> [#uses=1]
+	store ptr %76, ptr @next_free.4772, align 4
 	br i1 undef, label %bb1.i3, label %bb2.i4
 
 bb1.i3:		; preds = %bb.i2
-	call  void (i32, i32, i8*, i32, %struct.FILE_POS*, ...) @Error(i32 31, i32 1, i8* getelementptr ([32 x i8], [32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
+	call  void (i32, i32, ptr, i32, ptr, ...) @Error(i32 31, i32 1, ptr @.str1575, i32 1, ptr @no_file_pos) nounwind
 	br label %bb2.i4
 
 bb2.i4:		; preds = %bb1.i3, %bb.i2
-	%.pre1.i = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ]		; <i8**> [#uses=1]
-	%79 = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ]		; <i8**> [#uses=1]
-	%80 = getelementptr i8*, i8** %79, i32 1020		; <i8**> [#uses=1]
-	store i8** %80, i8*** @top_free.4773, align 4
+	%.pre1.i = phi ptr [ undef, %bb1.i3 ], [ %76, %bb.i2 ]		; <ptr> [#uses=1]
+	%77 = phi ptr [ undef, %bb1.i3 ], [ %76, %bb.i2 ]		; <ptr> [#uses=1]
+	%78 = getelementptr ptr, ptr %77, i32 1020		; <ptr> [#uses=1]
+	store ptr %78, ptr @top_free.4773, align 4
 	br label %GetMemory.exit
 
 GetMemory.exit:		; preds = %bb2.i4, %bb69
-	%81 = phi i8** [ %.pre1.i, %bb2.i4 ], [ undef, %bb69 ]		; <i8**> [#uses=2]
-	%82 = bitcast i8** %81 to %struct.rec*		; <%struct.rec*> [#uses=3]
-	%83 = getelementptr i8*, i8** %81, i32 %73		; <i8**> [#uses=1]
-	store i8** %83, i8*** @next_free.4772, align 4
-	store %struct.rec* %82, %struct.rec** @zz_hold, align 4
+	%79 = phi ptr [ %.pre1.i, %bb2.i4 ], [ undef, %bb69 ]		; <ptr> [#uses=2]
+	%80 = getelementptr ptr, ptr %79, i32 %72		; <ptr> [#uses=1]
+	store ptr %80, ptr @next_free.4772, align 4
+	store ptr %79, ptr @zz_hold, align 4
 	br label %bb71
 
 bb70:		; preds = %bb66
-	%84 = load %struct.rec*, %struct.rec** null, align 4		; <%struct.rec*> [#uses=1]
-	store %struct.rec* %84, %struct.rec** %74, align 4
+	%81 = load ptr, ptr null, align 4		; <ptr> [#uses=1]
+	store ptr %81, ptr %73, align 4
 	br label %bb71
 
 bb71:		; preds = %bb70, %GetMemory.exit
-	%.pre185 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ]		; <%struct.rec*> [#uses=8]
-	%85 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ]		; <%struct.rec*> [#uses=1]
-	%86 = getelementptr %struct.rec, %struct.rec* %85, i32 0, i32 0, i32 1, i32 0, i32 0		; <i8*> [#uses=0]
-	%87 = getelementptr %struct.rec, %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 1		; <%struct.rec**> [#uses=0]
-	%88 = getelementptr %struct.rec, %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 0		; <%struct.rec**> [#uses=1]
-	store %struct.rec* %.pre185, %struct.rec** @xx_link, align 4
-	store %struct.rec* %.pre185, %struct.rec** @zz_res, align 4
-	%89 = load %struct.rec*, %struct.rec** @needs, align 4		; <%struct.rec*> [#uses=2]
-	store %struct.rec* %89, %struct.rec** @zz_hold, align 4
+	%.pre185 = phi ptr [ %74, %bb70 ], [ %79, %GetMemory.exit ]		; <ptr> [#uses=8]
+	%82 = phi ptr [ %74, %bb70 ], [ %79, %GetMemory.exit ]		; <ptr> [#uses=1]
+	%83 = getelementptr %struct.rec, ptr %82, i32 0, i32 0, i32 1, i32 0, i32 0		; <ptr> [#uses=0]
+	%84 = getelementptr %struct.rec, ptr %.pre185, i32 0, i32 0, i32 0, i32 1, i32 1		; <ptr> [#uses=0]
+	%85 = getelementptr %struct.rec, ptr %.pre185, i32 0, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=1]
+	store ptr %.pre185, ptr @xx_link, align 4
+	store ptr %.pre185, ptr @zz_res, align 4
+	%86 = load ptr, ptr @needs, align 4		; <ptr> [#uses=2]
+	store ptr %86, ptr @zz_hold, align 4
 	br i1 false, label %bb77, label %bb73
 
 bb73:		; preds = %bb71
-	%90 = getelementptr %struct.rec, %struct.rec* %89, i32 0, i32 0, i32 0, i32 0, i32 0		; <%struct.rec**> [#uses=1]
-	store %struct.rec* null, %struct.rec** @zz_tmp, align 4
-	store %struct.rec* %.pre185, %struct.rec** %90
-	store %struct.rec* %.pre185, %struct.rec** undef, align 4
+	%87 = getelementptr %struct.rec, ptr %86, i32 0, i32 0, i32 0, i32 0, i32 0		; <ptr> [#uses=1]
+	store ptr null, ptr @zz_tmp, align 4
+	store ptr %.pre185, ptr %87
+	store ptr %.pre185, ptr undef, align 4
 	br label %bb77
 
 bb77:		; preds = %bb73, %bb71
-	store %struct.rec* %.pre185, %struct.rec** @zz_res, align 4
-	store %struct.rec* %71, %struct.rec** @zz_hold, align 4
+	store ptr %.pre185, ptr @zz_res, align 4
+	store ptr %70, ptr @zz_hold, align 4
 	br i1 undef, label %bb83, label %bb79
 
 bb79:		; preds = %bb77
-	%91 = getelementptr %struct.rec, %struct.rec* %71, i32 0, i32 0, i32 0, i32 1, i32 0		; <%struct.rec**> [#uses=1]
-	store %struct.rec* null, %struct.rec** @zz_tmp, align 4
-	%92 = load %struct.rec*, %struct.rec** %88, align 4		; <%struct.rec*> [#uses=1]
-	store %struct.rec* %92, %struct.rec** %91
-	%93 = getelementptr %struct.rec, %struct.rec* undef, i32 0, i32 0, i32 0, i32 1, i32 1		; <%struct.rec**> [#uses=1]
-	store %struct.rec* %71, %struct.rec** %93, align 4
-	store %struct.rec* %.pre185, %struct.rec** undef, align 4
+	%88 = getelementptr %struct.rec, ptr %70, i32 0, i32 0, i32 0, i32 1, i32 0		; <ptr> [#uses=1]
+	store ptr null, ptr @zz_tmp, align 4
+	%89 = load ptr, ptr %85, align 4		; <ptr> [#uses=1]
+	store ptr %89, ptr %88
+	%90 = getelementptr %struct.rec, ptr undef, i32 0, i32 0, i32 0, i32 1, i32 1		; <ptr> [#uses=1]
+	store ptr %70, ptr %90, align 4
+	store ptr %.pre185, ptr undef, align 4
 	br label %bb83
 
 bb83:		; preds = %bb79, %bb77
@@ -467,22 +464,22 @@ bb2.i6.i:		; preds = %bb.i2.i, %StringBeginsWith.exit.i, %bb.i.i
 	br i1 undef, label %strip_out.exitthread-split, label %bb3.i7.i
 
 bb3.i7.i:		; preds = %bb2.i6.i
-	%94 = load i8, i8* undef, align 1		; <i8> [#uses=1]
+	%91 = load i8, ptr undef, align 1		; <i8> [#uses=1]
 	br i1 undef, label %strip_out.exit, label %bb.i2.i
 
 strip_out.exitthread-split:		; preds = %bb2.i6.i
-	%.pr100 = load i8, i8* undef		; <i8> [#uses=1]
+	%.pr100 = load i8, ptr undef		; <i8> [#uses=1]
 	br label %strip_out.exit
 
 strip_out.exit:		; preds = %strip_out.exitthread-split, %bb3.i7.i
-	%95 = phi i8 [ %.pr100, %strip_out.exitthread-split ], [ %94, %bb3.i7.i ]		; <i8> [#uses=0]
+	%92 = phi i8 [ %.pr100, %strip_out.exitthread-split ], [ %91, %bb3.i7.i ]		; <i8> [#uses=0]
 	br i1 undef, label %bb94, label %bb91
 
 bb91:		; preds = %strip_out.exit, %bb.i2.i
 	unreachable
 
 bb94:		; preds = %strip_out.exit, %StringBeginsWith.exit.i
-	%96 = call  i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind		; <i8*> [#uses=0]
+	%93 = call  ptr @fgets(ptr %buff14, i32 512, ptr %11) nounwind		; <ptr> [#uses=0]
 	unreachable
 
 bb100.outer:		; preds = %bb58, %bb41, %bb100.outer.outer
@@ -493,16 +490,16 @@ bb100.outer:		; preds = %bb58, %bb41, %bb100.outer.outer
 	]
 
 bb101.split:		; preds = %bb100.outer
-	%97 = icmp eq i32 undef, 0		; <i1> [#uses=1]
-	br i1 %97, label %bb103, label %bb102
+	%94 = icmp eq i32 undef, 0		; <i1> [#uses=1]
+	br i1 %94, label %bb103, label %bb102
 
 bb102:		; preds = %bb101.split
-	%98 = call  i32 @remove(i8* getelementptr ([9 x i8], [9 x i8]* @.str19294, i32 0, i32 0)) nounwind		; <i32> [#uses=0]
+	%95 = call  i32 @remove(ptr @.str19294) nounwind		; <i32> [#uses=0]
 	unreachable
 
 bb103:		; preds = %bb101.split
-	%99 = load %struct.FILE*, %struct.FILE** @out_fp, align 4		; <%struct.FILE*> [#uses=1]
-	%100 = call  i32 (%struct.FILE*, i8*, ...) @fprintf(%struct.FILE* %99, i8* getelementptr ([26 x i8], [26 x i8]* @.str1932957, i32 0, i32 0)) nounwind		; <i32> [#uses=0]
-	store i32 0, i32* @wordcount, align 4
+	%96 = load ptr, ptr @out_fp, align 4		; <ptr> [#uses=1]
+	%97 = call  i32 (ptr, ptr, ...) @fprintf(ptr %96, ptr @.str1932957) nounwind		; <i32> [#uses=0]
+	store i32 0, ptr @wordcount, align 4
 	ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
index 2c6c05d5e21ea..59c93845e9d6a 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
@@ -2,17 +2,17 @@
 ; rdar://7117307
 
 	%struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
-	%struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
-	%struct.Patient = type { i32, i32, i32, %struct.Village* }
+	%struct.List = type { ptr, ptr, ptr }
+	%struct.Patient = type { i32, i32, i32, ptr }
 	%struct.Results = type { float, float, float }
-	%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
+	%struct.Village = type { [4 x ptr], ptr, %struct.List, %struct.Hosp, i32, i32 }
 
-define void @get_results(%struct.Results* noalias nocapture sret(%struct.Results) %agg.result, %struct.Village* %village) nounwind {
+define void @get_results(ptr noalias nocapture sret(%struct.Results) %agg.result, ptr %village) nounwind {
 entry:
 	br i1 undef, label %bb, label %bb6.preheader
 
 bb6.preheader:		; preds = %entry
-        call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 undef, i32 12, i1 false)
+        call void @llvm.memcpy.p0.p0.i32(ptr align 4 undef, ptr align 4 undef, i32 12, i1 false)
 	br i1 undef, label %bb15, label %bb13
 
 bb:		; preds = %entry
@@ -26,8 +26,8 @@ bb13:		; preds = %bb13, %bb6.preheader
 bb15:		; preds = %bb13, %bb6.preheader
 	%r1.0.0.lcssa = phi float [ 0.000000e+00, %bb6.preheader ], [ %1, %bb13 ]		; <float> [#uses=1]
 	%r1.1.0.lcssa = phi float [ undef, %bb6.preheader ], [ %0, %bb13 ]		; <float> [#uses=0]
-	store float %r1.0.0.lcssa, float* undef, align 4
+	store float %r1.0.0.lcssa, ptr undef, align 4
 	ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
index 9d4fc313cf9c5..3b7bde4f960b7 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
@@ -2,11 +2,11 @@
 ; rdar://7117307
 
 	%struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
-	%struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
-	%struct.Patient = type { i32, i32, i32, %struct.Village* }
-	%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
+	%struct.List = type { ptr, ptr, ptr }
+	%struct.Patient = type { i32, i32, i32, ptr }
+	%struct.Village = type { [4 x ptr], ptr, %struct.List, %struct.Hosp, i32, i32 }
 
-define %struct.List* @sim(%struct.Village* %village) nounwind {
+define ptr @sim(ptr %village) nounwind {
 entry:
 	br i1 undef, label %bb14, label %bb3.preheader
 
@@ -19,11 +19,11 @@ bb5:		; preds = %bb5, %bb3.preheader
 bb11:		; preds = %bb5
 	%0 = fmul float undef, 0x41E0000000000000		; <float> [#uses=1]
 	%1 = fptosi float %0 to i32		; <i32> [#uses=1]
-	store i32 %1, i32* undef, align 4
+	store i32 %1, ptr undef, align 4
 	br i1 undef, label %generate_patient.exit, label %generate_patient.exit.thread
 
 generate_patient.exit.thread:		; preds = %bb11
-	ret %struct.List* null
+	ret ptr null
 
 generate_patient.exit:		; preds = %bb11
 	br i1 undef, label %bb14, label %bb12
@@ -32,11 +32,11 @@ bb12:		; preds = %generate_patient.exit
 	br i1 undef, label %bb.i, label %bb1.i
 
 bb.i:		; preds = %bb12
-	ret %struct.List* null
+	ret ptr null
 
 bb1.i:		; preds = %bb12
-	ret %struct.List* null
+	ret ptr null
 
 bb14:		; preds = %generate_patient.exit, %entry
-	ret %struct.List* undef
+	ret ptr undef
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
index 66ed876f98e3e..1343057e9056f 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
@@ -2,11 +2,11 @@
 ; rdar://7117307
 
 	%struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
-	%struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
-	%struct.Patient = type { i32, i32, i32, %struct.Village* }
-	%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
+	%struct.List = type { ptr, ptr, ptr }
+	%struct.Patient = type { i32, i32, i32, ptr }
+	%struct.Village = type { [4 x ptr], ptr, %struct.List, %struct.Hosp, i32, i32 }
 
-define %struct.List* @sim(%struct.Village* %village) nounwind {
+define ptr @sim(ptr %village) nounwind {
 entry:
 	br i1 undef, label %bb14, label %bb3.preheader
 
@@ -17,7 +17,7 @@ bb5:		; preds = %bb5, %bb3.preheader
 	br i1 undef, label %bb11, label %bb5
 
 bb11:		; preds = %bb5
-	%0 = load i32, i32* undef, align 4		; <i32> [#uses=1]
+	%0 = load i32, ptr undef, align 4		; <i32> [#uses=1]
 	%1 = xor i32 %0, 123459876		; <i32> [#uses=1]
 	%2 = sdiv i32 %1, 127773		; <i32> [#uses=1]
 	%3 = mul i32 %2, 2836		; <i32> [#uses=1]
@@ -29,13 +29,13 @@ bb11:		; preds = %bb5
 	%8 = fptrunc double %7 to float		; <float> [#uses=2]
 	%9 = fmul float %8, 0x41E0000000000000		; <float> [#uses=1]
 	%10 = fptosi float %9 to i32		; <i32> [#uses=1]
-	store i32 %10, i32* undef, align 4
+	store i32 %10, ptr undef, align 4
 	%11 = fpext float %8 to double		; <double> [#uses=1]
 	%12 = fcmp ogt double %11, 6.660000e-01		; <i1> [#uses=1]
 	br i1 %12, label %generate_patient.exit, label %generate_patient.exit.thread
 
 generate_patient.exit.thread:		; preds = %bb11
-	ret %struct.List* null
+	ret ptr null
 
 generate_patient.exit:		; preds = %bb11
 	br i1 undef, label %bb14, label %bb12
@@ -44,11 +44,11 @@ bb12:		; preds = %generate_patient.exit
 	br i1 undef, label %bb.i, label %bb1.i
 
 bb.i:		; preds = %bb12
-	ret %struct.List* null
+	ret ptr null
 
 bb1.i:		; preds = %bb12
-	ret %struct.List* null
+	ret ptr null
 
 bb14:		; preds = %generate_patient.exit, %entry
-	ret %struct.List* undef
+	ret ptr undef
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
index a451321838e7c..0a315a9822e8d 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
@@ -2,7 +2,7 @@
 ; PR4659
 ; PR4682
 
-define hidden i32 @__gcov_execlp(i8* %path, i8* %arg, ...) nounwind {
+define hidden i32 @__gcov_execlp(ptr %path, ptr %arg, ...) nounwind {
 entry:
 ; CHECK-LABEL: __gcov_execlp:
 ; CHECK: sub sp, #8
@@ -13,20 +13,20 @@ entry:
 ; CHECK-NOT: mov sp, r7
 ; CHECK: add sp, #8
 	call void @__gcov_flush() nounwind
-	call void @llvm.va_start(i8* null)
+	call void @llvm.va_start(ptr null)
 	br i1 undef, label %bb5, label %bb
 
 bb:		; preds = %bb, %entry
 	br i1 undef, label %bb5, label %bb
 
 bb5:		; preds = %bb, %entry
-	%0 = alloca i8*, i32 undef, align 4		; <i8**> [#uses=1]
-	%1 = call i32 @execvp(i8* %path, i8** %0) nounwind		; <i32> [#uses=1]
+	%0 = alloca ptr, i32 undef, align 4		; <ptr> [#uses=1]
+	%1 = call i32 @execvp(ptr %path, ptr %0) nounwind		; <i32> [#uses=1]
 	ret i32 %1
 }
 
 declare hidden void @__gcov_flush()
 
-declare i32 @execvp(i8*, i8**) nounwind
+declare i32 @execvp(ptr, ptr) nounwind
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
index 93f5a0f6c41fd..767f2f0cb8892 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
@@ -1,16 +1,16 @@
 ; RUN: llc < %s -mtriple=armv7-eabi -mattr=+vfp2
 ; PR4686
 
-	%a = type { i32 (...)** }
+	%a = type { ptr }
 	%b = type { %a }
 	%c = type { float, float, float, float }
 
-declare arm_aapcs_vfpcc float @bar(%c*)
+declare arm_aapcs_vfpcc float @bar(ptr)
 
-define arm_aapcs_vfpcc void @foo(%b* %x, %c* %y) {
+define arm_aapcs_vfpcc void @foo(ptr %x, ptr %y) {
 entry:
-	%0 = call arm_aapcs_vfpcc  float @bar(%c* %y)		; <float> [#uses=0]
+	%0 = call arm_aapcs_vfpcc  float @bar(ptr %y)		; <float> [#uses=0]
 	%1 = fadd float undef, undef		; <float> [#uses=1]
-	store float %1, float* undef, align 8
+	store float %1, ptr undef, align 8
 	ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
index 5480868d7a668..0f866c3593800 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
@@ -1,44 +1,43 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8
 
-	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 	%struct.JHUFF_TBL = type { [17 x i8], [256 x i8], i32 }
 	%struct.JQUANT_TBL = type { [64 x i16], i32 }
 	%struct.__sFILEX = type opaque
-	%struct.__sbuf = type { i8*, i32 }
+	%struct.__sbuf = type { ptr, i32 }
 	%struct.anon = type { [8 x i32], [48 x i8] }
-	%struct.backing_store_info = type { void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*)*, %struct.FILE*, [64 x i8] }
-	%struct.jpeg_color_deconverter = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32, i8**, i32)* }
-	%struct.jpeg_color_quantizer = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i8**, i32)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)* }
-	%struct.jpeg_common_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32 }
-	%struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.JQUANT_TBL*, i8* }
-	%struct.jpeg_d_coef_controller = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, i8***)*, %struct.jvirt_barray_control** }
-	%struct.jpeg_d_main_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i32*, i32)* }
-	%struct.jpeg_d_post_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)* }
-	%struct.jpeg_decomp_master = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 }
-	%struct.jpeg_decompress_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32, %struct.jpeg_source_mgr*, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %struct.JQUANT_TBL*], [4 x %struct.JHUFF_TBL*], [4 x %struct.JHUFF_TBL*], i32, %struct.jpeg_component_info*, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, i8*, i32, [4 x %struct.jpeg_component_info*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %struct.jpeg_decomp_master*, %struct.jpeg_d_main_controller*, %struct.jpeg_d_coef_controller*, %struct.jpeg_d_post_controller*, %struct.jpeg_input_controller*, %struct.jpeg_marker_reader*, %struct.jpeg_entropy_decoder*, %struct.jpeg_inverse_dct*, %struct.jpeg_upsampler*, %struct.jpeg_color_deconverter*, %struct.jpeg_color_quantizer* }
-	%struct.jpeg_entropy_decoder = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, [64 x i16]**)* }
-	%struct.jpeg_error_mgr = type { void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i8*)*, void (%struct.jpeg_common_struct*)*, i32, %struct.anon, i32, i32, i8**, i32, i8**, i32, i32 }
-	%struct.jpeg_input_controller = type { i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32, i32 }
-	%struct.jpeg_inverse_dct = type { void (%struct.jpeg_decompress_struct*)*, [10 x void (%struct.jpeg_decompress_struct*, %struct.jpeg_component_info*, i16*, i8**, i32)*] }
-	%struct.jpeg_marker_reader = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, [16 x i32 (%struct.jpeg_decompress_struct*)*], i32, i32, i32, i32 }
-	%struct.jpeg_memory_mgr = type { i8* (%struct.jpeg_common_struct*, i32, i32)*, i8* (%struct.jpeg_common_struct*, i32, i32)*, i8** (%struct.jpeg_common_struct*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, i32, i32, i32)*, %struct.jvirt_sarray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, %struct.jvirt_barray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, void (%struct.jpeg_common_struct*)*, i8** (%struct.jpeg_common_struct*, %struct.jvirt_sarray_control*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, %struct.jvirt_barray_control*, i32, i32, i32)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, i32 }
-	%struct.jpeg_progress_mgr = type { void (%struct.jpeg_common_struct*)*, i32, i32, i32, i32 }
-	%struct.jpeg_source_mgr = type { i8*, i32, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i32)*, i32 (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*)* }
-	%struct.jpeg_upsampler = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)*, i32 }
-	%struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
-	%struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
+	%struct.backing_store_info = type { ptr, ptr, ptr, ptr, [64 x i8] }
+	%struct.jpeg_color_deconverter = type { ptr, ptr }
+	%struct.jpeg_color_quantizer = type { ptr, ptr, ptr, ptr }
+	%struct.jpeg_common_struct = type { ptr, ptr, ptr, i32, i32 }
+	%struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr }
+	%struct.jpeg_d_coef_controller = type { ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_d_main_controller = type { ptr, ptr }
+	%struct.jpeg_d_post_controller = type { ptr, ptr }
+	%struct.jpeg_decomp_master = type { ptr, ptr, i32 }
+	%struct.jpeg_decompress_struct = type { ptr, ptr, ptr, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, ptr, [4 x ptr], [4 x ptr], [4 x ptr], i32, ptr, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, ptr, i32, [4 x ptr], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_entropy_decoder = type { ptr, ptr }
+	%struct.jpeg_error_mgr = type { ptr, ptr, ptr, ptr, ptr, i32, %struct.anon, i32, i32, ptr, i32, ptr, i32, i32 }
+	%struct.jpeg_input_controller = type { ptr, ptr, ptr, ptr, i32, i32 }
+	%struct.jpeg_inverse_dct = type { ptr, [10 x ptr] }
+	%struct.jpeg_marker_reader = type { ptr, ptr, ptr, ptr, [16 x ptr], i32, i32, i32, i32 }
+	%struct.jpeg_memory_mgr = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32 }
+	%struct.jpeg_progress_mgr = type { ptr, i32, i32, i32, i32 }
+	%struct.jpeg_source_mgr = type { ptr, i32, ptr, ptr, ptr, ptr, ptr }
+	%struct.jpeg_upsampler = type { ptr, ptr, i32 }
+	%struct.jvirt_barray_control = type { ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, %struct.backing_store_info }
+	%struct.jvirt_sarray_control = type { ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, %struct.backing_store_info }
 
-define void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
+define void @jpeg_idct_float(ptr nocapture %cinfo, ptr nocapture %compptr, ptr nocapture %coef_block, ptr nocapture %output_buf, i32 %output_col) nounwind {
 entry:
 	br label %bb
 
 bb:		; preds = %bb, %entry
-	%0 = load float, float* undef, align 4		; <float> [#uses=1]
+	%0 = load float, ptr undef, align 4		; <float> [#uses=1]
 	%1 = fmul float undef, %0		; <float> [#uses=2]
 	%tmp73 = add i32 0, 224		; <i32> [#uses=1]
-	%scevgep74 = getelementptr i8, i8* null, i32 %tmp73		; <i8*> [#uses=1]
-	%scevgep7475 = bitcast i8* %scevgep74 to float*		; <float*> [#uses=1]
-	%2 = load float, float* null, align 4		; <float> [#uses=1]
+	%scevgep74 = getelementptr i8, ptr null, i32 %tmp73		; <ptr> [#uses=1]
+	%2 = load float, ptr null, align 4		; <float> [#uses=1]
 	%3 = fmul float 0.000000e+00, %2		; <float> [#uses=2]
 	%4 = fadd float %1, %3		; <float> [#uses=1]
 	%5 = fsub float %1, %3		; <float> [#uses=2]
@@ -51,7 +50,7 @@ bb:		; preds = %bb, %entry
 	%12 = sitofp i16 undef to float		; <float> [#uses=1]
 	%13 = fmul float %12, 0.000000e+00		; <float> [#uses=2]
 	%14 = sitofp i16 undef to float		; <float> [#uses=1]
-	%15 = load float, float* %scevgep7475, align 4		; <float> [#uses=1]
+	%15 = load float, ptr %scevgep74, align 4		; <float> [#uses=1]
 	%16 = fmul float %14, %15		; <float> [#uses=2]
 	%17 = fadd float undef, undef		; <float> [#uses=2]
 	%18 = fadd float %13, %16		; <float> [#uses=2]
@@ -67,14 +66,14 @@ bb:		; preds = %bb, %entry
 	%28 = fsub float %22, %27		; <float> [#uses=2]
 	%29 = fadd float %25, %28		; <float> [#uses=1]
 	%30 = fadd float undef, %20		; <float> [#uses=1]
-	store float %30, float* undef, align 4
+	store float %30, ptr undef, align 4
 	%31 = fadd float %10, %27		; <float> [#uses=1]
-	store float %31, float* undef, align 4
+	store float %31, ptr undef, align 4
 	%32 = fsub float %10, %27		; <float> [#uses=1]
-	store float %32, float* undef, align 4
+	store float %32, ptr undef, align 4
 	%33 = fadd float %11, %28		; <float> [#uses=1]
-	store float %33, float* undef, align 4
+	store float %33, ptr undef, align 4
 	%34 = fsub float %9, %29		; <float> [#uses=1]
-	store float %34, float* undef, align 4
+	store float %34, ptr undef, align 4
 	br label %bb
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll b/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
index e3c23ac025f4c..f6593721bed99 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=armv7-eabi -mattr=+vfp2
 ; PR4686
 
- at g_d = external global double		; <double*> [#uses=1]
+ at g_d = external global double		; <ptr> [#uses=1]
 
 define void @foo(float %yIncr) {
 entry:
@@ -10,7 +10,7 @@ entry:
 bb:		; preds = %entry
 	%0 = call arm_aapcs_vfpcc  float @bar()		; <float> [#uses=1]
 	%1 = fpext float %0 to double		; <double> [#uses=1]
-	store double %1, double* @g_d, align 8
+	store double %1, ptr @g_d, align 8
 	br label %bb4
 
 bb4:		; preds = %bb, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
index 2bbed1beae157..4871a00bc874f 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
@@ -2,12 +2,12 @@
 
 define float @t1(i32 %v0) nounwind {
 entry:
-	store i32 undef, i32* undef, align 4
-	%0 = load [4 x i8]*, [4 x i8]** undef, align 4		; <[4 x i8]*> [#uses=1]
-	%1 = load i8, i8* undef, align 1		; <i8> [#uses=1]
+	store i32 undef, ptr undef, align 4
+	%0 = load ptr, ptr undef, align 4		; <ptr> [#uses=1]
+	%1 = load i8, ptr undef, align 1		; <i8> [#uses=1]
 	%2 = zext i8 %1 to i32		; <i32> [#uses=1]
-	%3 = getelementptr [4 x i8], [4 x i8]* %0, i32 %v0, i32 0		; <i8*> [#uses=1]
-	%4 = load i8, i8* %3, align 1		; <i8> [#uses=1]
+	%3 = getelementptr [4 x i8], ptr %0, i32 %v0, i32 0		; <ptr> [#uses=1]
+	%4 = load i8, ptr %3, align 1		; <i8> [#uses=1]
 	%5 = zext i8 %4 to i32		; <i32> [#uses=1]
 	%6 = sub i32 %5, %2		; <i32> [#uses=1]
 	%7 = sitofp i32 %6 to float		; <float> [#uses=1]

diff  --git a/llvm/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll b/llvm/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
index ff6375d52f64e..4c54e308ea085 100644
--- a/llvm/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-08-21-PostRAKill4.ll
@@ -4,23 +4,23 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
 target triple = "armv7-apple-darwin9"
 
- at .str = external constant [36 x i8], align 1      ; <[36 x i8]*> [#uses=0]
- at .str1 = external constant [31 x i8], align 1     ; <[31 x i8]*> [#uses=1]
- at .str2 = external constant [4 x i8], align 1      ; <[4 x i8]*> [#uses=1]
+ at .str = external constant [36 x i8], align 1      ; <ptr> [#uses=0]
+ at .str1 = external constant [31 x i8], align 1     ; <ptr> [#uses=1]
+ at .str2 = external constant [4 x i8], align 1      ; <ptr> [#uses=1]
 
 declare i32 @getUnknown(i32, ...) nounwind
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
 
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_end(ptr) nounwind
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
 
 define i32 @main() nounwind {
 entry:
-  %0 = tail call  i32 (i8*, ...) @printf(i8* getelementptr ([31 x i8], [31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
-  %1 = tail call  i32 (i8*, ...) @printf(i8* getelementptr ([31 x i8], [31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
+  %0 = tail call  i32 (ptr, ...) @printf(ptr @.str1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
+  %1 = tail call  i32 (ptr, ...) @printf(ptr @.str1, i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
   %2 = tail call  i32 (i32, ...) @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
-  %3 = tail call  i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
+  %3 = tail call  i32 (ptr, ...) @printf(ptr @.str2, i32 %2) nounwind ; <i32> [#uses=0]
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll b/llvm/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
index ee1f03e2c428d..b03c120d399bc 100644
--- a/llvm/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
@@ -3,59 +3,59 @@
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
 target triple = "thumbv7-apple-darwin9"
 
- at history = internal global [2 x [56 x i32]] [[56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0], [56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0]] ; <[2 x [56 x i32]]*> [#uses=3]
- at nodes = internal global i64 0                    ; <i64*> [#uses=4]
- at .str = private constant [9 x i8] c"##-<=>+#\00", align 1 ; <[9 x i8]*> [#uses=2]
- at .str1 = private constant [6 x i8] c"%c%d\0A\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str2 = private constant [16 x i8] c"Fhourstones 2.0\00", align 1 ; <[16 x i8]*> [#uses=1]
- at .str3 = private constant [54 x i8] c"Using %d transposition table entries with %d probes.\0A\00", align 1 ; <[54 x i8]*> [#uses=1]
- at .str4 = private constant [31 x i8] c"Solving %d-ply position after \00", align 1 ; <[31 x i8]*> [#uses=1]
- at .str5 = private constant [7 x i8] c" . . .\00", align 1 ; <[7 x i8]*> [#uses=1]
- at .str6 = private constant [28 x i8] c"score = %d (%c)  work = %d\0A\00", align 1 ; <[28 x i8]*> [#uses=1]
- at .str7 = private constant [36 x i8] c"%lu pos / %lu msec = %.1f Kpos/sec\0A\00", align 1 ; <[36 x i8]*> [#uses=1]
- at plycnt = internal global i32 0                   ; <i32*> [#uses=21]
- at dias = internal global [19 x i32] zeroinitializer ; <[19 x i32]*> [#uses=43]
- at columns = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=18]
- at height = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=21]
- at rows = internal global [8 x i32] zeroinitializer ; <[8 x i32]*> [#uses=20]
- at colthr = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=5]
- at moves = internal global [44 x i32] zeroinitializer ; <[44 x i32]*> [#uses=9]
- at .str8 = private constant [3 x i8] c"%d\00", align 1 ; <[3 x i8]*> [#uses=1]
- at he = internal global i8* null                    ; <i8**> [#uses=9]
- at hits = internal global i64 0                     ; <i64*> [#uses=8]
- at posed = internal global i64 0                    ; <i64*> [#uses=7]
- at ht = internal global i32* null                   ; <i32**> [#uses=5]
- at .str16 = private constant [19 x i8] c"store rate = %.3f\0A\00", align 1 ; <[19 x i8]*> [#uses=1]
- at .str117 = private constant [45 x i8] c"- %5.3f  < %5.3f  = %5.3f  > %5.3f  + %5.3f\0A\00", align 1 ; <[45 x i8]*> [#uses=1]
- at .str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <[30 x i8]*> [#uses=1]
+ at history = internal global [2 x [56 x i32]] [[56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0], [56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0]] ; <ptr> [#uses=3]
+ at nodes = internal global i64 0                    ; <ptr> [#uses=4]
+ at .str = private constant [9 x i8] c"##-<=>+#\00", align 1 ; <ptr> [#uses=2]
+ at .str1 = private constant [6 x i8] c"%c%d\0A\00", align 1 ; <ptr> [#uses=1]
+ at .str2 = private constant [16 x i8] c"Fhourstones 2.0\00", align 1 ; <ptr> [#uses=1]
+ at .str3 = private constant [54 x i8] c"Using %d transposition table entries with %d probes.\0A\00", align 1 ; <ptr> [#uses=1]
+ at .str4 = private constant [31 x i8] c"Solving %d-ply position after \00", align 1 ; <ptr> [#uses=1]
+ at .str5 = private constant [7 x i8] c" . . .\00", align 1 ; <ptr> [#uses=1]
+ at .str6 = private constant [28 x i8] c"score = %d (%c)  work = %d\0A\00", align 1 ; <ptr> [#uses=1]
+ at .str7 = private constant [36 x i8] c"%lu pos / %lu msec = %.1f Kpos/sec\0A\00", align 1 ; <ptr> [#uses=1]
+ at plycnt = internal global i32 0                   ; <ptr> [#uses=21]
+ at dias = internal global [19 x i32] zeroinitializer ; <ptr> [#uses=43]
+ at columns = internal global [128 x i32] zeroinitializer ; <ptr> [#uses=18]
+ at height = internal global [128 x i32] zeroinitializer ; <ptr> [#uses=21]
+ at rows = internal global [8 x i32] zeroinitializer ; <ptr> [#uses=20]
+ at colthr = internal global [128 x i32] zeroinitializer ; <ptr> [#uses=5]
+ at moves = internal global [44 x i32] zeroinitializer ; <ptr> [#uses=9]
+ at .str8 = private constant [3 x i8] c"%d\00", align 1 ; <ptr> [#uses=1]
+ at he = internal global ptr null                    ; <ptr> [#uses=9]
+ at hits = internal global i64 0                     ; <ptr> [#uses=8]
+ at posed = internal global i64 0                    ; <ptr> [#uses=7]
+ at ht = internal global ptr null                   ; <ptr> [#uses=5]
+ at .str16 = private constant [19 x i8] c"store rate = %.3f\0A\00", align 1 ; <ptr> [#uses=1]
+ at .str117 = private constant [45 x i8] c"- %5.3f  < %5.3f  = %5.3f  > %5.3f  + %5.3f\0A\00", align 1 ; <ptr> [#uses=1]
+ at .str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <ptr> [#uses=1]
+ at .str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <ptr> [#uses=1]
 
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
 
 declare i32 @getchar() nounwind
 
 define internal i32 @transpose() nounwind readonly {
 ; CHECK: push
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
+  %0 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
   %1 = shl i32 %0, 7                              ; <i32> [#uses=1]
-  %2 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 2), align 4 ; <i32> [#uses=1]
+  %2 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 2), align 4 ; <i32> [#uses=1]
   %3 = or i32 %1, %2                              ; <i32> [#uses=1]
   %4 = shl i32 %3, 7                              ; <i32> [#uses=1]
-  %5 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 3), align 4 ; <i32> [#uses=1]
+  %5 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 3), align 4 ; <i32> [#uses=1]
   %6 = or i32 %4, %5                              ; <i32> [#uses=3]
-  %7 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 7), align 4 ; <i32> [#uses=1]
+  %7 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 7), align 4 ; <i32> [#uses=1]
   %8 = shl i32 %7, 7                              ; <i32> [#uses=1]
-  %9 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 6), align 4 ; <i32> [#uses=1]
+  %9 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 6), align 4 ; <i32> [#uses=1]
   %10 = or i32 %8, %9                             ; <i32> [#uses=1]
   %11 = shl i32 %10, 7                            ; <i32> [#uses=1]
-  %12 = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 5), align 4 ; <i32> [#uses=1]
+  %12 = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 5), align 4 ; <i32> [#uses=1]
   %13 = or i32 %11, %12                           ; <i32> [#uses=3]
   %14 = icmp ugt i32 %6, %13                      ; <i1> [#uses=2]
   %.pn2.in.i = select i1 %14, i32 %6, i32 %13     ; <i32> [#uses=1]
   %.pn1.in.i = select i1 %14, i32 %13, i32 %6     ; <i32> [#uses=1]
   %.pn2.i = shl i32 %.pn2.in.i, 7                 ; <i32> [#uses=1]
-  %.pn3.i = load i32, i32* getelementptr inbounds ([128 x i32], [128 x i32]* @columns, i32 0, i32 4) ; <i32> [#uses=1]
+  %.pn3.i = load i32, ptr getelementptr inbounds ([128 x i32], ptr @columns, i32 0, i32 4) ; <i32> [#uses=1]
   %.pn.in.in.i = or i32 %.pn2.i, %.pn3.i          ; <i32> [#uses=1]
   %.pn.in.i = zext i32 %.pn.in.in.i to i64        ; <i64> [#uses=1]
   %.pn.i = shl i64 %.pn.in.i, 21                  ; <i64> [#uses=1]
@@ -67,19 +67,19 @@ entry:
   %18 = trunc i64 %17 to i32                      ; <i32> [#uses=1]
   %19 = urem i32 %16, 179                         ; <i32> [#uses=1]
   %20 = or i32 %19, 131072                        ; <i32> [#uses=1]
-  %21 = load i32*, i32** @ht, align 4                   ; <i32*> [#uses=1]
+  %21 = load ptr, ptr @ht, align 4                   ; <ptr> [#uses=1]
   br label %bb5
 
 bb:                                               ; preds = %bb5
-  %22 = getelementptr inbounds i32, i32* %21, i32 %x.0 ; <i32*> [#uses=1]
-  %23 = load i32, i32* %22, align 4                    ; <i32> [#uses=1]
+  %22 = getelementptr inbounds i32, ptr %21, i32 %x.0 ; <ptr> [#uses=1]
+  %23 = load i32, ptr %22, align 4                    ; <i32> [#uses=1]
   %24 = icmp eq i32 %23, %16                      ; <i1> [#uses=1]
   br i1 %24, label %bb1, label %bb2
 
 bb1:                                              ; preds = %bb
-  %25 = load i8*, i8** @he, align 4                    ; <i8*> [#uses=1]
-  %26 = getelementptr inbounds i8, i8* %25, i32 %x.0  ; <i8*> [#uses=1]
-  %27 = load i8, i8* %26, align 1                     ; <i8> [#uses=1]
+  %25 = load ptr, ptr @he, align 4                    ; <ptr> [#uses=1]
+  %26 = getelementptr inbounds i8, ptr %25, i32 %x.0  ; <ptr> [#uses=1]
+  %27 = load i8, ptr %26, align 1                     ; <i8> [#uses=1]
   %28 = sext i8 %27 to i32                        ; <i32> [#uses=1]
   ret i32 %28
 
@@ -101,4 +101,4 @@ bb7:                                              ; preds = %bb5
   ret i32 -128
 }
 
-declare noalias i8* @calloc(i32, i32) nounwind
+declare noalias ptr @calloc(i32, i32) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll b/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
index 04d46e60d7dbf..1cefa32b65bc4 100644
--- a/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
@@ -2,26 +2,26 @@
 ; RUN: llc < %s -mtriple=thumbv7-eabi -mcpu=cortex-a8 -float-abi=hard -regalloc=basic | FileCheck %s
 ; PR5204
 
-%"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { i8* }
+%"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { ptr }
 %"struct.__gnu_cxx::new_allocator<char>" = type <{ i8 }>
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >" = type { %"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" }
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep" = type { %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" }
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" = type { i32, i32, i32 }
 
 
-define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
+define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(ptr %this, ptr %__str) {
 ; CHECK-LABEL: _ZNKSs7compareERKSs:
 ; CHECK:      it  eq
 ; CHECK-NEXT: subeq{{(.w)?}} r0, r{{[0-9]+}}, r{{[0-9]+}}
 ; CHECK-NEXT: pop.w
 entry:
-  %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
-  %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i32> [#uses=3]
+  %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(ptr %this) ; <i32> [#uses=3]
+  %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(ptr %__str) ; <i32> [#uses=3]
   %2 = icmp ult i32 %1, %0                        ; <i1> [#uses=1]
   %3 = select i1 %2, i32 %1, i32 %0               ; <i32> [#uses=1]
-  %4 = tail call arm_aapcs_vfpcc  i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i8*> [#uses=1]
-  %5 = tail call arm_aapcs_vfpcc  i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i8*> [#uses=1]
-  %6 = tail call arm_aapcs_vfpcc  i32 @memcmp(i8* %4, i8* %5, i32 %3) nounwind readonly ; <i32> [#uses=2]
+  %4 = tail call arm_aapcs_vfpcc  ptr @_ZNKSs7_M_dataEv(ptr %this) ; <ptr> [#uses=1]
+  %5 = tail call arm_aapcs_vfpcc  ptr @_ZNKSs4dataEv(ptr %__str) ; <ptr> [#uses=1]
+  %6 = tail call arm_aapcs_vfpcc  i32 @memcmp(ptr %4, ptr %5, i32 %3) nounwind readonly ; <i32> [#uses=2]
   %7 = icmp eq i32 %6, 0                          ; <i1> [#uses=1]
   br i1 %7, label %bb, label %bb1
 
@@ -33,10 +33,10 @@ bb1:                                              ; preds = %entry
   ret i32 %6
 }
 
-declare arm_aapcs_vfpcc i32 @memcmp(i8* nocapture, i8* nocapture, i32) nounwind readonly
+declare arm_aapcs_vfpcc i32 @memcmp(ptr nocapture, ptr nocapture, i32) nounwind readonly
 
-declare arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(ptr %this)
 
-declare arm_aapcs_vfpcc i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc ptr @_ZNKSs7_M_dataEv(ptr %this)
 
-declare arm_aapcs_vfpcc i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc ptr @_ZNKSs4dataEv(ptr %this)

diff  --git a/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll b/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
index e283cb3434cc1..a42a2a3fb8289 100644
--- a/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
 
-%struct.OP = type { %struct.OP*, %struct.OP*, %struct.OP* ()*, i32, i16, i16, i8, i8 }
-%struct.SV = type { i8*, i32, i32 }
+%struct.OP = type { ptr, ptr, ptr, i32, i16, i16, i8, i8 }
+%struct.SV = type { ptr, i32, i32 }
 
-declare void @Perl_mg_set(%struct.SV*) nounwind
+declare void @Perl_mg_set(ptr) nounwind
 
-define %struct.OP* @Perl_pp_complement() nounwind {
+define ptr @Perl_pp_complement() nounwind {
 entry:
-  %0 = load %struct.SV*, %struct.SV** null, align 4            ; <%struct.SV*> [#uses=2]
+  %0 = load ptr, ptr null, align 4            ; <ptr> [#uses=2]
   br i1 undef, label %bb21, label %bb5
 
 bb5:                                              ; preds = %entry
@@ -17,54 +17,53 @@ bb6:                                              ; preds = %bb5
   br i1 undef, label %bb8, label %bb7
 
 bb7:                                              ; preds = %bb6
-  %1 = getelementptr inbounds %struct.SV, %struct.SV* %0, i32 0, i32 0 ; <i8**> [#uses=1]
-  %2 = load i8*, i8** %1, align 4                      ; <i8*> [#uses=1]
-  %3 = getelementptr inbounds i8, i8* %2, i32 12      ; <i8*> [#uses=1]
-  %4 = bitcast i8* %3 to i32*                     ; <i32*> [#uses=1]
-  %5 = load i32, i32* %4, align 4                      ; <i32> [#uses=1]
-  %storemerge5 = xor i32 %5, -1                   ; <i32> [#uses=1]
-  call  void @Perl_sv_setiv(%struct.SV* undef, i32 %storemerge5) nounwind
-  %6 = getelementptr inbounds %struct.SV, %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
-  %7 = load i32, i32* %6, align 4                      ; <i32> [#uses=1]
-  %8 = and i32 %7, 16384                          ; <i32> [#uses=1]
-  %9 = icmp eq i32 %8, 0                          ; <i1> [#uses=1]
-  br i1 %9, label %bb12, label %bb11
+  %1 = getelementptr inbounds %struct.SV, ptr %0, i32 0, i32 0 ; <ptr> [#uses=1]
+  %2 = load ptr, ptr %1, align 4                      ; <ptr> [#uses=1]
+  %3 = getelementptr inbounds i8, ptr %2, i32 12      ; <ptr> [#uses=1]
+  %4 = load i32, ptr %3, align 4                      ; <i32> [#uses=1]
+  %storemerge5 = xor i32 %4, -1                   ; <i32> [#uses=1]
+  call  void @Perl_sv_setiv(ptr undef, i32 %storemerge5) nounwind
+  %5 = getelementptr inbounds %struct.SV, ptr undef, i32 0, i32 2 ; <ptr> [#uses=1]
+  %6 = load i32, ptr %5, align 4                      ; <i32> [#uses=1]
+  %7 = and i32 %6, 16384                          ; <i32> [#uses=1]
+  %8 = icmp eq i32 %7, 0                          ; <i1> [#uses=1]
+  br i1 %8, label %bb12, label %bb11
 
 bb8:                                              ; preds = %bb6
   unreachable
 
 bb11:                                             ; preds = %bb7
-  call  void @Perl_mg_set(%struct.SV* undef) nounwind
+  call  void @Perl_mg_set(ptr undef) nounwind
   br label %bb12
 
 bb12:                                             ; preds = %bb11, %bb7
-  store %struct.SV* undef, %struct.SV** null, align 4
+  store ptr undef, ptr null, align 4
   br label %bb44
 
 bb13:                                             ; preds = %bb5
-  %10 = call  i32 @Perl_sv_2uv(%struct.SV* %0) nounwind ; <i32> [#uses=0]
+  %9 = call  i32 @Perl_sv_2uv(ptr %0) nounwind ; <i32> [#uses=0]
   br i1 undef, label %bb.i, label %bb1.i
 
 bb.i:                                             ; preds = %bb13
-  call  void @Perl_sv_setiv(%struct.SV* undef, i32 undef) nounwind
+  call  void @Perl_sv_setiv(ptr undef, i32 undef) nounwind
   br label %Perl_sv_setuv.exit
 
 bb1.i:                                            ; preds = %bb13
   br label %Perl_sv_setuv.exit
 
 Perl_sv_setuv.exit:                               ; preds = %bb1.i, %bb.i
-  %11 = getelementptr inbounds %struct.SV, %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
-  %12 = load i32, i32* %11, align 4                    ; <i32> [#uses=1]
-  %13 = and i32 %12, 16384                        ; <i32> [#uses=1]
-  %14 = icmp eq i32 %13, 0                        ; <i1> [#uses=1]
-  br i1 %14, label %bb20, label %bb19
+  %10 = getelementptr inbounds %struct.SV, ptr undef, i32 0, i32 2 ; <ptr> [#uses=1]
+  %11 = load i32, ptr %10, align 4                    ; <i32> [#uses=1]
+  %12 = and i32 %11, 16384                        ; <i32> [#uses=1]
+  %13 = icmp eq i32 %12, 0                        ; <i1> [#uses=1]
+  br i1 %13, label %bb20, label %bb19
 
 bb19:                                             ; preds = %Perl_sv_setuv.exit
-  call  void @Perl_mg_set(%struct.SV* undef) nounwind
+  call  void @Perl_mg_set(ptr undef) nounwind
   br label %bb20
 
 bb20:                                             ; preds = %bb19, %Perl_sv_setuv.exit
-  store %struct.SV* undef, %struct.SV** null, align 4
+  store ptr undef, ptr null, align 4
   br label %bb44
 
 bb21:                                             ; preds = %entry
@@ -77,9 +76,9 @@ bb23:                                             ; preds = %bb21
   unreachable
 
 bb44:                                             ; preds = %bb20, %bb12
-  ret %struct.OP* undef
+  ret ptr undef
 }
 
-declare void @Perl_sv_setiv(%struct.SV*, i32) nounwind
+declare void @Perl_sv_setiv(ptr, i32) nounwind
 
-declare i32 @Perl_sv_2uv(%struct.SV*) nounwind
+declare i32 @Perl_sv_2uv(ptr) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll b/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
index 0c9fa5efa0bbb..a98a99430d12e 100644
--- a/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
@@ -1,20 +1,20 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
 ; rdar://7394794
 
-define void @lshift_double(i64 %l1, i64 %h1, i64 %count, i32 %prec, i64* nocapture %lv, i64* nocapture %hv, i32 %arith) nounwind {
+define void @lshift_double(i64 %l1, i64 %h1, i64 %count, i32 %prec, ptr nocapture %lv, ptr nocapture %hv, i32 %arith) nounwind {
 entry:
   %..i = select i1 false, i64 0, i64 0            ; <i64> [#uses=1]
   br i1 undef, label %bb11.i, label %bb6.i
 
 bb6.i:                                            ; preds = %entry
   %0 = lshr i64 %h1, 0                            ; <i64> [#uses=1]
-  store i64 %0, i64* %hv, align 4
+  store i64 %0, ptr %hv, align 4
   %1 = lshr i64 %l1, 0                            ; <i64> [#uses=1]
   %2 = or i64 0, %1                               ; <i64> [#uses=1]
-  store i64 %2, i64* %lv, align 4
+  store i64 %2, ptr %lv, align 4
   br label %bb11.i
 
 bb11.i:                                           ; preds = %bb6.i, %entry
-  store i64 %..i, i64* %lv, align 4
+  store i64 %..i, ptr %lv, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
index b4248b81748d6..0057242eafb05 100644
--- a/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
@@ -3,7 +3,7 @@
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
 
-define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
+define void @fred(i32 %three_by_three, ptr %in, double %dt1, i32 %x_size, i32 %y_size, ptr %bp) nounwind {
 entry:
 ; -- The loop following the load should only use a single add-literation
 ;    instruction.
@@ -12,108 +12,108 @@ entry:
 ; CHECK: subsections_via_symbols
 
 
-  %three_by_three_addr = alloca i32               ; <i32*> [#uses=2]
-  %in_addr = alloca i8*                           ; <i8**> [#uses=2]
-  %dt_addr = alloca float                         ; <float*> [#uses=4]
-  %x_size_addr = alloca i32                       ; <i32*> [#uses=2]
-  %y_size_addr = alloca i32                       ; <i32*> [#uses=1]
-  %bp_addr = alloca i8*                           ; <i8**> [#uses=1]
-  %tmp_image = alloca i8*                         ; <i8**> [#uses=0]
-  %out = alloca i8*                               ; <i8**> [#uses=1]
-  %cp = alloca i8*                                ; <i8**> [#uses=0]
-  %dpt = alloca i8*                               ; <i8**> [#uses=4]
-  %dp = alloca i8*                                ; <i8**> [#uses=2]
-  %ip = alloca i8*                                ; <i8**> [#uses=0]
-  %centre = alloca i32                            ; <i32*> [#uses=0]
-  %tmp = alloca i32                               ; <i32*> [#uses=0]
-  %brightness = alloca i32                        ; <i32*> [#uses=0]
-  %area = alloca i32                              ; <i32*> [#uses=0]
-  %y = alloca i32                                 ; <i32*> [#uses=0]
-  %x = alloca i32                                 ; <i32*> [#uses=2]
-  %j = alloca i32                                 ; <i32*> [#uses=6]
-  %i = alloca i32                                 ; <i32*> [#uses=1]
-  %mask_size = alloca i32                         ; <i32*> [#uses=5]
-  %increment = alloca i32                         ; <i32*> [#uses=1]
-  %n_max = alloca i32                             ; <i32*> [#uses=4]
-  %temp = alloca float                            ; <float*> [#uses=1]
+  %three_by_three_addr = alloca i32               ; <ptr> [#uses=2]
+  %in_addr = alloca ptr                           ; <ptr> [#uses=2]
+  %dt_addr = alloca float                         ; <ptr> [#uses=4]
+  %x_size_addr = alloca i32                       ; <ptr> [#uses=2]
+  %y_size_addr = alloca i32                       ; <ptr> [#uses=1]
+  %bp_addr = alloca ptr                           ; <ptr> [#uses=1]
+  %tmp_image = alloca ptr                         ; <ptr> [#uses=0]
+  %out = alloca ptr                               ; <ptr> [#uses=1]
+  %cp = alloca ptr                                ; <ptr> [#uses=0]
+  %dpt = alloca ptr                               ; <ptr> [#uses=4]
+  %dp = alloca ptr                                ; <ptr> [#uses=2]
+  %ip = alloca ptr                                ; <ptr> [#uses=0]
+  %centre = alloca i32                            ; <ptr> [#uses=0]
+  %tmp = alloca i32                               ; <ptr> [#uses=0]
+  %brightness = alloca i32                        ; <ptr> [#uses=0]
+  %area = alloca i32                              ; <ptr> [#uses=0]
+  %y = alloca i32                                 ; <ptr> [#uses=0]
+  %x = alloca i32                                 ; <ptr> [#uses=2]
+  %j = alloca i32                                 ; <ptr> [#uses=6]
+  %i = alloca i32                                 ; <ptr> [#uses=1]
+  %mask_size = alloca i32                         ; <ptr> [#uses=5]
+  %increment = alloca i32                         ; <ptr> [#uses=1]
+  %n_max = alloca i32                             ; <ptr> [#uses=4]
+  %temp = alloca float                            ; <ptr> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  store i32 %three_by_three, i32* %three_by_three_addr
-  store i8* %in, i8** %in_addr
+  store i32 %three_by_three, ptr %three_by_three_addr
+  store ptr %in, ptr %in_addr
   %dt = fptrunc double %dt1 to float              ; <float> [#uses=1]
-  store float %dt, float* %dt_addr
-  store i32 %x_size, i32* %x_size_addr
-  store i32 %y_size, i32* %y_size_addr
-  store i8* %bp, i8** %bp_addr
-  %0 = load i8*, i8** %in_addr, align 4                ; <i8*> [#uses=1]
-  store i8* %0, i8** %out, align 4
+  store float %dt, ptr %dt_addr
+  store i32 %x_size, ptr %x_size_addr
+  store i32 %y_size, ptr %y_size_addr
+  store ptr %bp, ptr %bp_addr
+  %0 = load ptr, ptr %in_addr, align 4                ; <ptr> [#uses=1]
+  store ptr %0, ptr %out, align 4
   %1 = call  i32 (...) @foo() nounwind ; <i32> [#uses=1]
-  store i32 %1, i32* %i, align 4
-  %2 = load i32, i32* %three_by_three_addr, align 4    ; <i32> [#uses=1]
+  store i32 %1, ptr %i, align 4
+  %2 = load i32, ptr %three_by_three_addr, align 4    ; <i32> [#uses=1]
   %3 = icmp eq i32 %2, 0                          ; <i1> [#uses=1]
   br i1 %3, label %bb, label %bb2
 
 bb:                                               ; preds = %entry
-  %4 = load float, float* %dt_addr, align 4              ; <float> [#uses=1]
+  %4 = load float, ptr %dt_addr, align 4              ; <float> [#uses=1]
   %5 = fpext float %4 to double                   ; <double> [#uses=1]
   %6 = fmul double %5, 1.500000e+00               ; <double> [#uses=1]
   %7 = fptosi double %6 to i32                    ; <i32> [#uses=1]
   %8 = add nsw i32 %7, 1                          ; <i32> [#uses=1]
-  store i32 %8, i32* %mask_size, align 4
+  store i32 %8, ptr %mask_size, align 4
   br label %bb3
 
 bb2:                                              ; preds = %entry
-  store i32 1, i32* %mask_size, align 4
+  store i32 1, ptr %mask_size, align 4
   br label %bb3
 
 bb3:                                              ; preds = %bb2, %bb
-  %9 = load i32, i32* %mask_size, align 4              ; <i32> [#uses=1]
+  %9 = load i32, ptr %mask_size, align 4              ; <i32> [#uses=1]
   %10 = mul i32 %9, 2                             ; <i32> [#uses=1]
   %11 = add nsw i32 %10, 1                        ; <i32> [#uses=1]
-  store i32 %11, i32* %n_max, align 4
-  %12 = load i32, i32* %x_size_addr, align 4           ; <i32> [#uses=1]
-  %13 = load i32, i32* %n_max, align 4                 ; <i32> [#uses=1]
+  store i32 %11, ptr %n_max, align 4
+  %12 = load i32, ptr %x_size_addr, align 4           ; <i32> [#uses=1]
+  %13 = load i32, ptr %n_max, align 4                 ; <i32> [#uses=1]
   %14 = sub i32 %12, %13                          ; <i32> [#uses=1]
-  store i32 %14, i32* %increment, align 4
-  %15 = load i32, i32* %n_max, align 4                 ; <i32> [#uses=1]
-  %16 = load i32, i32* %n_max, align 4                 ; <i32> [#uses=1]
+  store i32 %14, ptr %increment, align 4
+  %15 = load i32, ptr %n_max, align 4                 ; <i32> [#uses=1]
+  %16 = load i32, ptr %n_max, align 4                 ; <i32> [#uses=1]
   %17 = mul i32 %15, %16                          ; <i32> [#uses=1]
-  %18 = call  noalias i8* @malloc(i32 %17) nounwind ; <i8*> [#uses=1]
-  store i8* %18, i8** %dp, align 4
-  %19 = load i8*, i8** %dp, align 4                    ; <i8*> [#uses=1]
-  store i8* %19, i8** %dpt, align 4
-  %20 = load float, float* %dt_addr, align 4             ; <float> [#uses=1]
-  %21 = load float, float* %dt_addr, align 4             ; <float> [#uses=1]
+  %18 = call  noalias ptr @malloc(i32 %17) nounwind ; <ptr> [#uses=1]
+  store ptr %18, ptr %dp, align 4
+  %19 = load ptr, ptr %dp, align 4                    ; <ptr> [#uses=1]
+  store ptr %19, ptr %dpt, align 4
+  %20 = load float, ptr %dt_addr, align 4             ; <float> [#uses=1]
+  %21 = load float, ptr %dt_addr, align 4             ; <float> [#uses=1]
   %22 = fmul float %20, %21                       ; <float> [#uses=1]
   %23 = fsub float -0.000000e+00, %22             ; <float> [#uses=1]
-  store float %23, float* %temp, align 4
-  %24 = load i32, i32* %mask_size, align 4             ; <i32> [#uses=1]
+  store float %23, ptr %temp, align 4
+  %24 = load i32, ptr %mask_size, align 4             ; <i32> [#uses=1]
   %25 = sub i32 0, %24                            ; <i32> [#uses=1]
-  store i32 %25, i32* %j, align 4
+  store i32 %25, ptr %j, align 4
   br label %bb5
 
 bb4:                                              ; preds = %bb5
-  %26 = load i32, i32* %j, align 4                     ; <i32> [#uses=1]
-  %27 = load i32, i32* %j, align 4                     ; <i32> [#uses=1]
+  %26 = load i32, ptr %j, align 4                     ; <i32> [#uses=1]
+  %27 = load i32, ptr %j, align 4                     ; <i32> [#uses=1]
   %28 = mul i32 %26, %27                          ; <i32> [#uses=1]
   %29 = sitofp i32 %28 to double                  ; <double> [#uses=1]
   %30 = fmul double %29, 1.234000e+00             ; <double> [#uses=1]
   %31 = fptosi double %30 to i32                  ; <i32> [#uses=1]
-  store i32 %31, i32* %x, align 4
-  %32 = load i32, i32* %x, align 4                     ; <i32> [#uses=1]
+  store i32 %31, ptr %x, align 4
+  %32 = load i32, ptr %x, align 4                     ; <i32> [#uses=1]
   %33 = trunc i32 %32 to i8                       ; <i8> [#uses=1]
-  %34 = load i8*, i8** %dpt, align 4                   ; <i8*> [#uses=1]
-  store i8 %33, i8* %34, align 1
-  %35 = load i8*, i8** %dpt, align 4                   ; <i8*> [#uses=1]
-  %36 = getelementptr inbounds i8, i8* %35, i64 1     ; <i8*> [#uses=1]
-  store i8* %36, i8** %dpt, align 4
-  %37 = load i32, i32* %j, align 4                     ; <i32> [#uses=1]
+  %34 = load ptr, ptr %dpt, align 4                   ; <ptr> [#uses=1]
+  store i8 %33, ptr %34, align 1
+  %35 = load ptr, ptr %dpt, align 4                   ; <ptr> [#uses=1]
+  %36 = getelementptr inbounds i8, ptr %35, i64 1     ; <ptr> [#uses=1]
+  store ptr %36, ptr %dpt, align 4
+  %37 = load i32, ptr %j, align 4                     ; <i32> [#uses=1]
   %38 = add nsw i32 %37, 1                        ; <i32> [#uses=1]
-  store i32 %38, i32* %j, align 4
+  store i32 %38, ptr %j, align 4
   br label %bb5
 
 bb5:                                              ; preds = %bb4, %bb3
-  %39 = load i32, i32* %j, align 4                     ; <i32> [#uses=1]
-  %40 = load i32, i32* %mask_size, align 4             ; <i32> [#uses=1]
+  %39 = load i32, ptr %j, align 4                     ; <i32> [#uses=1]
+  %40 = load i32, ptr %mask_size, align 4             ; <i32> [#uses=1]
   %41 = icmp sle i32 %39, %40                     ; <i1> [#uses=1]
   br i1 %41, label %bb4, label %bb6
 
@@ -126,4 +126,4 @@ return:                                           ; preds = %bb6
 
 declare i32 @foo(...)
 
-declare noalias i8* @malloc(i32) nounwind
+declare noalias ptr @malloc(i32) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll b/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
index 1b0b91e86cf21..f82e3c150235e 100644
--- a/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
@@ -6,84 +6,83 @@ target triple = "thumbv7-apple-darwin10"
 
 %struct.PlatformMutex = type { i32, [40 x i8] }
 %struct.SpinLock = type { %struct.PlatformMutex }
-%"struct.WTF::TCMalloc_ThreadCache" = type { i32, %struct._opaque_pthread_t*, i8, [68 x %"struct.WTF::TCMalloc_ThreadCache_FreeList"], i32, i32, %"struct.WTF::TCMalloc_ThreadCache"*, %"struct.WTF::TCMalloc_ThreadCache"* }
-%"struct.WTF::TCMalloc_ThreadCache_FreeList" = type { i8*, i16, i16 }
-%struct.__darwin_pthread_handler_rec = type { void (i8*)*, i8*, %struct.__darwin_pthread_handler_rec* }
-%struct._opaque_pthread_t = type { i32, %struct.__darwin_pthread_handler_rec*, [596 x i8] }
-
- at _ZN3WTFL8heap_keyE = internal global i32 0       ; <i32*> [#uses=1]
- at _ZN3WTFL10tsd_initedE.b = internal global i1 false ; <i1*> [#uses=2]
- at _ZN3WTFL13pageheap_lockE = internal global %struct.SpinLock { %struct.PlatformMutex { i32 850045863, [40 x i8] zeroinitializer } } ; <%struct.SpinLock*> [#uses=1]
- at _ZN3WTFL12thread_heapsE = internal global %"struct.WTF::TCMalloc_ThreadCache"* null ; <%"struct.WTF::TCMalloc_ThreadCache"**> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (%"struct.WTF::TCMalloc_ThreadCache"* ()* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv() nounwind {
+%"struct.WTF::TCMalloc_ThreadCache" = type { i32, ptr, i8, [68 x %"struct.WTF::TCMalloc_ThreadCache_FreeList"], i32, i32, ptr, ptr }
+%"struct.WTF::TCMalloc_ThreadCache_FreeList" = type { ptr, i16, i16 }
+%struct.__darwin_pthread_handler_rec = type { ptr, ptr, ptr }
+%struct._opaque_pthread_t = type { i32, ptr, [596 x i8] }
+
+ at _ZN3WTFL8heap_keyE = internal global i32 0       ; <ptr> [#uses=1]
+ at _ZN3WTFL10tsd_initedE.b = internal global i1 false ; <ptr> [#uses=2]
+ at _ZN3WTFL13pageheap_lockE = internal global %struct.SpinLock { %struct.PlatformMutex { i32 850045863, [40 x i8] zeroinitializer } } ; <ptr> [#uses=1]
+ at _ZN3WTFL12thread_heapsE = internal global ptr null ; <ptr> [#uses=1]
+ at llvm.used = appending global [1 x ptr] [ptr @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv], section "llvm.metadata" ; <ptr> [#uses=0]
+
+define ptr @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv() nounwind {
 entry:
-  %0 = tail call  i32 @pthread_mutex_lock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock, %struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
-  %.b24 = load i1, i1* @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
+  %0 = tail call  i32 @pthread_mutex_lock(ptr @_ZN3WTFL13pageheap_lockE) nounwind
+  %.b24 = load i1, ptr @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
   br i1 %.b24, label %bb5, label %bb6
 
 bb5:                                              ; preds = %entry
-  %1 = tail call  %struct._opaque_pthread_t* @pthread_self() nounwind
+  %1 = tail call  ptr @pthread_self() nounwind
   br label %bb6
 
 bb6:                                              ; preds = %bb5, %entry
-  %me.0 = phi %struct._opaque_pthread_t* [ %1, %bb5 ], [ null, %entry ] ; <%struct._opaque_pthread_t*> [#uses=2]
+  %me.0 = phi ptr [ %1, %bb5 ], [ null, %entry ] ; <ptr> [#uses=2]
   br label %bb11
 
 bb7:                                              ; preds = %bb11
-  %2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 1
-  %3 = load %struct._opaque_pthread_t*, %struct._opaque_pthread_t** %2, align 4
-  %4 = tail call  i32 @pthread_equal(%struct._opaque_pthread_t* %3, %struct._opaque_pthread_t* %me.0) nounwind
+  %2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", ptr %h.0, i32 0, i32 1
+  %3 = load ptr, ptr %2, align 4
+  %4 = tail call  i32 @pthread_equal(ptr %3, ptr %me.0) nounwind
   %5 = icmp eq i32 %4, 0
   br i1 %5, label %bb10, label %bb14
 
 bb10:                                             ; preds = %bb7
-  %6 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 6
+  %6 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", ptr %h.0, i32 0, i32 6
   br label %bb11
 
 bb11:                                             ; preds = %bb10, %bb6
-  %h.0.in = phi %"struct.WTF::TCMalloc_ThreadCache"** [ @_ZN3WTFL12thread_heapsE, %bb6 ], [ %6, %bb10 ] ; <%"struct.WTF::TCMalloc_ThreadCache"**> [#uses=1]
-  %h.0 = load %"struct.WTF::TCMalloc_ThreadCache"*, %"struct.WTF::TCMalloc_ThreadCache"** %h.0.in, align 4 ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
-  %7 = icmp eq %"struct.WTF::TCMalloc_ThreadCache"* %h.0, null
+  %h.0.in = phi ptr [ @_ZN3WTFL12thread_heapsE, %bb6 ], [ %6, %bb10 ] ; <ptr> [#uses=1]
+  %h.0 = load ptr, ptr %h.0.in, align 4 ; <ptr> [#uses=4]
+  %7 = icmp eq ptr %h.0, null
   br i1 %7, label %bb13, label %bb7
 
 bb13:                                             ; preds = %bb11
-  %8 = tail call  %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t* %me.0) nounwind
+  %8 = tail call  ptr @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(ptr %me.0) nounwind
   br label %bb14
 
 bb14:                                             ; preds = %bb13, %bb7
-  %heap.1 = phi %"struct.WTF::TCMalloc_ThreadCache"* [ %8, %bb13 ], [ %h.0, %bb7 ] ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
-  %9 = tail call  i32 @pthread_mutex_unlock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock, %struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
-  %10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %heap.1, i32 0, i32 2
-  %11 = load i8, i8* %10, align 4
+  %heap.1 = phi ptr [ %8, %bb13 ], [ %h.0, %bb7 ] ; <ptr> [#uses=4]
+  %9 = tail call  i32 @pthread_mutex_unlock(ptr @_ZN3WTFL13pageheap_lockE) nounwind
+  %10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", ptr %heap.1, i32 0, i32 2
+  %11 = load i8, ptr %10, align 4
   %toBool15not = icmp eq i8 %11, 0                ; <i1> [#uses=1]
   br i1 %toBool15not, label %bb19, label %bb22
 
 bb19:                                             ; preds = %bb14
-  %.b = load i1, i1* @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
+  %.b = load i1, ptr @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
   br i1 %.b, label %bb21, label %bb22
 
 bb21:                                             ; preds = %bb19
-  store i8 1, i8* %10, align 4
-  %12 = load i32, i32* @_ZN3WTFL8heap_keyE, align 4
-  %13 = bitcast %"struct.WTF::TCMalloc_ThreadCache"* %heap.1 to i8*
-  %14 = tail call  i32 @pthread_setspecific(i32 %12, i8* %13) nounwind
-  ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
+  store i8 1, ptr %10, align 4
+  %12 = load i32, ptr @_ZN3WTFL8heap_keyE, align 4
+  %13 = tail call  i32 @pthread_setspecific(i32 %12, ptr %heap.1) nounwind
+  ret ptr %heap.1
 
 bb22:                                             ; preds = %bb19, %bb14
-  ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
+  ret ptr %heap.1
 }
 
-declare i32 @pthread_mutex_lock(%struct.PlatformMutex*)
+declare i32 @pthread_mutex_lock(ptr)
 
-declare i32 @pthread_mutex_unlock(%struct.PlatformMutex*)
+declare i32 @pthread_mutex_unlock(ptr)
 
-declare hidden %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t*) nounwind
+declare hidden ptr @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(ptr) nounwind
 
-declare i32 @pthread_setspecific(i32, i8*)
+declare i32 @pthread_setspecific(i32, ptr)
 
-declare %struct._opaque_pthread_t* @pthread_self()
+declare ptr @pthread_self()
 
-declare i32 @pthread_equal(%struct._opaque_pthread_t*, %struct._opaque_pthread_t*)
+declare i32 @pthread_equal(ptr, ptr)
 

diff  --git a/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll b/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
index a20d36ba5ed32..a07f1e6610edc 100644
--- a/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
@@ -6,16 +6,15 @@
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
 target triple = "thumbv7-apple-darwin10"
 
-declare void @etoe53(i16* nocapture, i16* nocapture) nounwind
+declare void @etoe53(ptr nocapture, ptr nocapture) nounwind
 
-define void @earith(double* nocapture %value, i32 %icode, double* nocapture %r1, double* nocapture %r2) nounwind {
+define void @earith(ptr nocapture %value, i32 %icode, ptr nocapture %r1, ptr nocapture %r2) nounwind {
 entry:
-  %v = alloca [6 x i16], align 4                  ; <[6 x i16]*> [#uses=1]
+  %v = alloca [6 x i16], align 4                  ; <ptr> [#uses=1]
   br i1 undef, label %bb2.i, label %bb5
 
 bb2.i:                                            ; preds = %entry
-  %0 = bitcast double* %value to i16*             ; <i16*> [#uses=1]
-  call  void @etoe53(i16* null, i16* %0) nounwind
+  call  void @etoe53(ptr null, ptr %value) nounwind
   ret void
 
 bb5:                                              ; preds = %entry
@@ -46,8 +45,7 @@ bb35:                                             ; preds = %bb5
   unreachable
 
 bb46:                                             ; preds = %bb26, %bb10
-  %1 = bitcast double* %value to i16*             ; <i16*> [#uses=1]
-  %v47 = getelementptr inbounds [6 x i16], [6 x i16]* %v, i32 0, i32 0 ; <i16*> [#uses=1]
-  call  void @etoe53(i16* %v47, i16* %1) nounwind
+  %v47 = getelementptr inbounds [6 x i16], ptr %v, i32 0, i32 0 ; <ptr> [#uses=1]
+  call  void @etoe53(ptr %v47, ptr %value) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
index 86e434a304334..aca29e6715521 100644
--- a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
@@ -4,12 +4,12 @@
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
 target triple = "thumbv7-apple-darwin3.0.0-iphoneos"
 
-define void @FindMin(double* %panelTDEL, i8* %dclOfRow, i32 %numRows, i32 %numCols, double* %retMin_RES_TDEL) {
+define void @FindMin(ptr %panelTDEL, ptr %dclOfRow, i32 %numRows, i32 %numCols, ptr %retMin_RES_TDEL) {
 entry:
-  %panelTDEL.addr = alloca double*, align 4       ; <double**> [#uses=1]
-  %panelResTDEL = alloca [2560 x double], align 4 ; <[2560 x double]*> [#uses=0]
-  store double* %panelTDEL, double** %panelTDEL.addr
-  store double* %retMin_RES_TDEL, double** undef
-  store i32 0, i32* undef
+  %panelTDEL.addr = alloca ptr, align 4       ; <ptr> [#uses=1]
+  %panelResTDEL = alloca [2560 x double], align 4 ; <ptr> [#uses=0]
+  store ptr %panelTDEL, ptr %panelTDEL.addr
+  store ptr %retMin_RES_TDEL, ptr undef
+  store i32 0, ptr undef
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll b/llvm/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
index 7d19d15f2a306..53f4f334f17c5 100644
--- a/llvm/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
@@ -1,19 +1,19 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin
 
- at .str41196 = external constant [2 x i8], align 4  ; <[2 x i8]*> [#uses=1]
+ at .str41196 = external constant [2 x i8], align 4  ; <ptr> [#uses=1]
 
 declare void @syStopraw(i32) nounwind
 
-declare i32 @SyFopen(i8*, i8*) nounwind
+declare i32 @SyFopen(ptr, ptr) nounwind
 
-declare i8* @SyFgets(i8*, i32) nounwind
+declare ptr @SyFgets(ptr, i32) nounwind
 
-define void @SyHelp(i8* nocapture %topic, i32 %fin) nounwind {
+define void @SyHelp(ptr nocapture %topic, i32 %fin) nounwind {
 entry:
-  %line = alloca [256 x i8], align 4              ; <[256 x i8]*> [#uses=1]
-  %secname = alloca [1024 x i8], align 4          ; <[1024 x i8]*> [#uses=0]
-  %last = alloca [256 x i8], align 4              ; <[256 x i8]*> [#uses=1]
-  %last2 = alloca [256 x i8], align 4             ; <[256 x i8]*> [#uses=1]
+  %line = alloca [256 x i8], align 4              ; <ptr> [#uses=1]
+  %secname = alloca [1024 x i8], align 4          ; <ptr> [#uses=0]
+  %last = alloca [256 x i8], align 4              ; <ptr> [#uses=1]
+  %last2 = alloca [256 x i8], align 4             ; <ptr> [#uses=1]
   br i1 undef, label %bb, label %bb2
 
 bb:                                               ; preds = %entry
@@ -70,7 +70,7 @@ bb163:                                            ; preds = %bb162, %bb161
   unreachable
 
 bb224:                                            ; preds = %bb162
-  %0 = call  i32 @SyFopen(i8* undef, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str41196, i32 0, i32 0)) nounwind ; <i32> [#uses=2]
+  %0 = call  i32 @SyFopen(ptr undef, ptr @.str41196) nounwind ; <i32> [#uses=2]
   br i1 false, label %bb297, label %bb300
 
 bb297:                                            ; preds = %bb224
@@ -138,8 +138,8 @@ bb345:                                            ; preds = %bb345, %bb339
   %4 = phi i8 [ %5, %bb345 ], [ undef, %bb339 ]   ; <i8> [#uses=0]
   %indvar670 = phi i32 [ %tmp673, %bb345 ], [ 0, %bb339 ] ; <i32> [#uses=1]
   %tmp673 = add i32 %indvar670, 1                 ; <i32> [#uses=2]
-  %scevgep674 = getelementptr [256 x i8], [256 x i8]* %last, i32 0, i32 %tmp673 ; <i8*> [#uses=1]
-  %5 = load i8, i8* %scevgep674, align 1              ; <i8> [#uses=1]
+  %scevgep674 = getelementptr [256 x i8], ptr %last, i32 0, i32 %tmp673 ; <ptr> [#uses=1]
+  %5 = load i8, ptr %scevgep674, align 1              ; <i8> [#uses=1]
   br i1 undef, label %bb347, label %bb345
 
 bb347:                                            ; preds = %bb345
@@ -166,8 +166,8 @@ bb362:                                            ; preds = %bb361
 bb366:                                            ; preds = %bb366, %bb360
   %indvar662 = phi i32 [ %tmp665, %bb366 ], [ 0, %bb360 ] ; <i32> [#uses=1]
   %tmp665 = add i32 %indvar662, 1                 ; <i32> [#uses=2]
-  %scevgep666 = getelementptr [256 x i8], [256 x i8]* %last2, i32 0, i32 %tmp665 ; <i8*> [#uses=1]
-  %6 = load i8, i8* %scevgep666, align 1              ; <i8> [#uses=0]
+  %scevgep666 = getelementptr [256 x i8], ptr %last2, i32 0, i32 %tmp665 ; <ptr> [#uses=1]
+  %6 = load i8, ptr %scevgep666, align 1              ; <i8> [#uses=0]
   br i1 false, label %bb368, label %bb366
 
 bb368:                                            ; preds = %bb366
@@ -177,8 +177,8 @@ bb369:                                            ; preds = %bb368, %bb356
   br i1 undef, label %bb373, label %bb388
 
 bb373:                                            ; preds = %bb383, %bb369
-  %7 = call  i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=1]
-  %8 = icmp eq i8* %7, null                       ; <i1> [#uses=1]
+  %7 = call  ptr @SyFgets(ptr undef, i32 %0) nounwind ; <ptr> [#uses=1]
+  %8 = icmp eq ptr %7, null                       ; <i1> [#uses=1]
   br i1 %8, label %bb375, label %bb383
 
 bb375:                                            ; preds = %bb373
@@ -189,7 +189,7 @@ bb376:                                            ; preds = %bb375
   ret void
 
 bb383:                                            ; preds = %bb373
-  %10 = load i8, i8* undef, align 1                   ; <i8> [#uses=1]
+  %10 = load i8, ptr undef, align 1                   ; <i8> [#uses=1]
   %cond1 = icmp eq i8 %10, 46                     ; <i1> [#uses=1]
   br i1 %cond1, label %bb373, label %bb388
 
@@ -203,7 +203,7 @@ bb390:                                            ; preds = %isdigit1498.exit83,
 
 bb391:                                            ; preds = %bb390, %bb388
   %indvar724 = phi i32 [ %indvar.next725, %bb390 ], [ 0, %bb388 ] ; <i32> [#uses=2]
-  %11 = load i8, i8* undef, align 1                   ; <i8> [#uses=0]
+  %11 = load i8, ptr undef, align 1                   ; <i8> [#uses=0]
   br i1 false, label %bb395, label %bb392
 
 bb392:                                            ; preds = %bb391
@@ -217,7 +217,7 @@ bb394:                                            ; preds = %isdigit1498.exit87
 
 bb395:                                            ; preds = %bb394, %isdigit1498.exit83, %bb391
   %storemerge14.sum = add i32 %indvar724, undef   ; <i32> [#uses=1]
-  %p.26 = getelementptr [256 x i8], [256 x i8]* %line, i32 0, i32 %storemerge14.sum ; <i8*> [#uses=1]
+  %p.26 = getelementptr [256 x i8], ptr %line, i32 0, i32 %storemerge14.sum ; <ptr> [#uses=1]
   br i1 undef, label %bb400, label %isdigit1498.exit87
 
 isdigit1498.exit87:                               ; preds = %bb395
@@ -227,11 +227,11 @@ bb400:                                            ; preds = %isdigit1498.exit87,
   br i1 undef, label %bb402, label %bb403
 
 bb402:                                            ; preds = %bb400
-  %12 = getelementptr inbounds i8, i8* %p.26, i32 undef ; <i8*> [#uses=1]
+  %12 = getelementptr inbounds i8, ptr %p.26, i32 undef ; <ptr> [#uses=1]
   br label %bb403
 
 bb403:                                            ; preds = %bb402, %bb400
-  %p.29 = phi i8* [ %12, %bb402 ], [ undef, %bb400 ] ; <i8*> [#uses=0]
+  %p.29 = phi ptr [ %12, %bb402 ], [ undef, %bb400 ] ; <ptr> [#uses=0]
   br i1 undef, label %bb405, label %bb404
 
 bb404:                                            ; preds = %bb403
@@ -255,7 +255,7 @@ bb428:                                            ; preds = %bb407
   br label %bb440
 
 bb440:                                            ; preds = %bb428, %bb300
-  %13 = call  i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=0]
+  %13 = call  ptr @SyFgets(ptr undef, i32 %0) nounwind ; <ptr> [#uses=0]
   br i1 false, label %bb442, label %bb308
 
 bb442:                                            ; preds = %bb440

diff  --git a/llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll b/llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
index b28f4542cf3d2..8613b5eb4f4e7 100644
--- a/llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
@@ -7,9 +7,9 @@
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
 	
 %0 = type { i32, i32 }
-%s1 = type { %s3, i32, %s4, i8*, void (i8*, i8*)*, i8*, i32*, i32*, i32*, i32, i64, [1 x i32] }
-%s2 = type { i32 (...)**, %s4 }
-%s3 = type { %s2, i32, i32, i32*, [4 x i8], float, %s4, i8*, i8* }
+%s1 = type { %s3, i32, %s4, ptr, ptr, ptr, ptr, ptr, ptr, i32, i64, [1 x i32] }
+%s2 = type { ptr, %s4 }
+%s3 = type { %s2, i32, i32, ptr, [4 x i8], float, %s4, ptr, ptr }
 %s4 = type { %s5 }
 %s5 = type { i32 }
 
@@ -20,44 +20,43 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
 ; CHECK-DAG: lsls    {{r[0-9]+}}
 ; CHECK-NEXT: orr.w   {{r[0-9]+}}
 ; CHECK-NEXT: InlineAsm Start
-define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
+define void @test(ptr %this, i32 %format, i32 %w, i32 %h, i32 %levels, ptr %s, ptr %data, ptr nocapture %rowbytes, ptr %release, ptr %info) nounwind {
 entry:
-  %tmp1 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
-  store volatile i32 1, i32* %tmp1, align 4
-  %tmp12 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 1
-  store i32 %levels, i32* %tmp12, align 4
-  %tmp13 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 3
-  store i8* %data, i8** %tmp13, align 4
-  %tmp14 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 4
-  store void (i8*, i8*)* %release, void (i8*, i8*)** %tmp14, align 4
-  %tmp15 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 5
-  store i8* %info, i8** %tmp15, align 4
-  %tmp16 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 6
-  store i32* null, i32** %tmp16, align 4
-  %tmp17 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 7
-  store i32* null, i32** %tmp17, align 4
-  %tmp19 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 10
-  store i64 0, i64* %tmp19, align 4
-  %tmp20 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 0
-  tail call  void @f1(%s3* %tmp20, i32* %s) nounwind
+  %tmp1 = getelementptr inbounds %s1, ptr %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+  store volatile i32 1, ptr %tmp1, align 4
+  %tmp12 = getelementptr inbounds %s1, ptr %this, i32 0, i32 1
+  store i32 %levels, ptr %tmp12, align 4
+  %tmp13 = getelementptr inbounds %s1, ptr %this, i32 0, i32 3
+  store ptr %data, ptr %tmp13, align 4
+  %tmp14 = getelementptr inbounds %s1, ptr %this, i32 0, i32 4
+  store ptr %release, ptr %tmp14, align 4
+  %tmp15 = getelementptr inbounds %s1, ptr %this, i32 0, i32 5
+  store ptr %info, ptr %tmp15, align 4
+  %tmp16 = getelementptr inbounds %s1, ptr %this, i32 0, i32 6
+  store ptr null, ptr %tmp16, align 4
+  %tmp17 = getelementptr inbounds %s1, ptr %this, i32 0, i32 7
+  store ptr null, ptr %tmp17, align 4
+  %tmp19 = getelementptr inbounds %s1, ptr %this, i32 0, i32 10
+  store i64 0, ptr %tmp19, align 4
+  tail call  void @f1(ptr %this, ptr %s) nounwind
   %tmp21 = shl i32 %format, 6
   %tmp22 = tail call  zeroext i8 @f2(i32 %format) nounwind
   %toBoolnot = icmp eq i8 %tmp22, 0
   %tmp23 = zext i1 %toBoolnot to i32
   %flags.0 = or i32 %tmp23, %tmp21
   %tmp24 = shl i32 %flags.0, 16
-  %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind
-  %tmp25 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 2, i32 0, i32 0
-  store volatile i32 1, i32* %tmp25, align 4
+  %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(ptr %tmp1, i32 %tmp24) nounwind
+  %tmp25 = getelementptr inbounds %s1, ptr %this, i32 0, i32 2, i32 0, i32 0
+  store volatile i32 1, ptr %tmp25, align 4
   %tmp26 = icmp eq i32 %levels, 0
   br i1 %tmp26, label %return, label %bb4
 
 bb4:
   %l.09 = phi i32 [ %tmp28, %bb4 ], [ 0, %entry ]
-  %scevgep = getelementptr %s1, %s1* %this, i32 0, i32 11, i32 %l.09
-  %scevgep10 = getelementptr i32, i32* %rowbytes, i32 %l.09
-  %tmp27 = load i32, i32* %scevgep10, align 4
-  store i32 %tmp27, i32* %scevgep, align 4
+  %scevgep = getelementptr %s1, ptr %this, i32 0, i32 11, i32 %l.09
+  %scevgep10 = getelementptr i32, ptr %rowbytes, i32 %l.09
+  %tmp27 = load i32, ptr %scevgep10, align 4
+  store i32 %tmp27, ptr %scevgep, align 4
   %tmp28 = add i32 %l.09, 1
   %exitcond = icmp eq i32 %tmp28, %levels
   br i1 %exitcond, label %return, label %bb4
@@ -66,5 +65,5 @@ return:
   ret void
 }
 
-declare void @f1(%s3*, i32*)
+declare void @f1(ptr, ptr)
 declare zeroext i8 @f2(i32)

diff  --git a/llvm/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll b/llvm/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
index 3be016fbd1dd9..b7cb3a2447b79 100644
--- a/llvm/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
@@ -53,7 +53,7 @@ bb107:                                            ; preds = %bb18
 bb110:                                            ; preds = %bb122, %bb107
   %asmtmp.i.i179 = tail call i16 asm "rev16 $0, $1\0A", "=l,l"(i16 undef) nounwind ; <i16> [#uses=1]
   %asmtmp.i.i178 = tail call i16 asm "rev16 $0, $1\0A", "=l,l"(i16 %asmtmp.i.i179) nounwind ; <i16> [#uses=1]
-  store i16 %asmtmp.i.i178, i16* undef, align 2
+  store i16 %asmtmp.i.i178, ptr undef, align 2
   br i1 undef, label %bb122, label %bb121
 
 bb121:                                            ; preds = %bb110

diff  --git a/llvm/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll b/llvm/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
index 15f789e49d41e..51b7366d65edd 100644
--- a/llvm/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
@@ -10,9 +10,9 @@ target triple = "thumbv7-apple-darwin10"
 ;
 ; Only one can be coalesced.
 
- at .str = private constant [7 x i8] c"%g %g\0A\00", align 4 ; <[7 x i8]*> [#uses=1]
+ at .str = private constant [7 x i8] c"%g %g\0A\00", align 4 ; <ptr> [#uses=1]
 
-define i32 @main(i32 %argc, i8** nocapture %Argv) nounwind {
+define i32 @main(i32 %argc, ptr nocapture %Argv) nounwind {
 entry:
   %0 = icmp eq i32 %argc, 2123                    ; <i1> [#uses=1]
   %U.0 = select i1 %0, double 3.282190e+01, double 8.731834e+02 ; <double> [#uses=2]
@@ -32,11 +32,11 @@ entry:
   %tmp7 = extractelement <2 x double> %5, i32 0   ; <double> [#uses=1]
   %tmp5 = extractelement <2 x double> %5, i32 1   ; <double> [#uses=1]
 ; CHECK: printf
-  %7 = tail call  i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), double %tmp7, double %tmp5) nounwind ; <i32> [#uses=0]
+  %7 = tail call  i32 (ptr, ...) @printf(ptr @.str, double %tmp7, double %tmp5) nounwind ; <i32> [#uses=0]
   %tmp3 = extractelement <2 x double> %6, i32 0   ; <double> [#uses=1]
   %tmp1 = extractelement <2 x double> %6, i32 1   ; <double> [#uses=1]
-  %8 = tail call  i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), double %tmp3, double %tmp1) nounwind ; <i32> [#uses=0]
+  %8 = tail call  i32 (ptr, ...) @printf(ptr @.str, double %tmp3, double %tmp1) nounwind ; <i32> [#uses=0]
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll b/llvm/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
index 42039161fbb00..b89640ced3311 100644
--- a/llvm/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
@@ -3,7 +3,7 @@
 
 declare arm_apcscc i32 @__maskrune(i32, i32)
 
-define arm_apcscc i32 @strncmpic(i8* nocapture %s1, i8* nocapture %s2, i32 %n) nounwind {
+define arm_apcscc i32 @strncmpic(ptr nocapture %s1, ptr nocapture %s2, i32 %n) nounwind {
 entry:
   br i1 undef, label %bb11, label %bb19
 
@@ -18,7 +18,7 @@ bb1.i.i11:                                        ; preds = %bb11
   %1 = tail call arm_apcscc  i32 @__maskrune(i32 %0, i32 32768) nounwind ; <i32> [#uses=1]
   %2 = icmp ne i32 %1, 0                          ; <i1> [#uses=1]
   %3 = zext i1 %2 to i32                          ; <i32> [#uses=1]
-  %.pre = load i8, i8* undef, align 1                 ; <i8> [#uses=1]
+  %.pre = load i8, ptr undef, align 1                 ; <i8> [#uses=1]
   br label %isupper144.exit12
 
 isupper144.exit12:                                ; preds = %bb1.i.i11, %bb.i.i10

diff  --git a/llvm/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll b/llvm/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
index 34569e9116f2d..bc7a6a149a4b3 100644
--- a/llvm/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
@@ -2,33 +2,33 @@
 ; rdar://8115404
 ; Tail merging must not split an IT block.
 
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 %struct._RuneCharClass = type { [14 x i8], i32 }
-%struct._RuneEntry = type { i32, i32, i32, i32* }
-%struct._RuneLocale = type { [8 x i8], [32 x i8], i32 (i8*, i32, i8**)*, i32 (i32, i8*, i32, i8**)*, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, i8*, i32, i32, %struct._RuneCharClass* }
-%struct._RuneRange = type { i32, %struct._RuneEntry* }
+%struct._RuneEntry = type { i32, i32, i32, ptr }
+%struct._RuneLocale = type { [8 x i8], [32 x i8], ptr, ptr, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, ptr, i32, i32, ptr }
+%struct._RuneRange = type { i32, ptr }
 %struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
-
- at finput = external global %struct.FILE*           ; <%struct.FILE**> [#uses=1]
- at _DefaultRuneLocale = external global %struct._RuneLocale ; <%struct._RuneLocale*> [#uses=0]
- at token_buffer = external global [1025 x i8], align 4 ; <[1025 x i8]*> [#uses=1]
- at .str73 = external constant [6 x i8], align 4     ; <[6 x i8]*> [#uses=0]
- at .str174 = external constant [5 x i8], align 4    ; <[5 x i8]*> [#uses=0]
- at .str275 = external constant [6 x i8], align 4    ; <[6 x i8]*> [#uses=0]
- at .str376 = external constant [5 x i8], align 4    ; <[5 x i8]*> [#uses=0]
- at .str477 = external constant [6 x i8], align 4    ; <[6 x i8]*> [#uses=0]
- at .str578 = external constant [6 x i8], align 4    ; <[6 x i8]*> [#uses=0]
- at .str679 = external constant [7 x i8], align 4    ; <[7 x i8]*> [#uses=0]
- at .str780 = external constant [6 x i8], align 4    ; <[6 x i8]*> [#uses=0]
- at .str881 = external constant [5 x i8], align 4    ; <[5 x i8]*> [#uses=0]
- at .str982 = external constant [6 x i8], align 4    ; <[6 x i8]*> [#uses=0]
- at .str1083 = external constant [9 x i8], align 4   ; <[9 x i8]*> [#uses=0]
- at .str1184 = external constant [7 x i8], align 4   ; <[7 x i8]*> [#uses=0]
- at .str1285 = external constant [16 x i8], align 4  ; <[16 x i8]*> [#uses=0]
- at .str1386 = external constant [12 x i8], align 4  ; <[12 x i8]*> [#uses=0]
- at .str1487 = external constant [5 x i8], align 4   ; <[5 x i8]*> [#uses=0]
- at llvm.used = external global [1 x i8*]            ; <[1 x i8*]*> [#uses=0]
+%struct.__sbuf = type { ptr, i32 }
+
+ at finput = external global ptr           ; <ptr> [#uses=1]
+ at _DefaultRuneLocale = external global %struct._RuneLocale ; <ptr> [#uses=0]
+ at token_buffer = external global [1025 x i8], align 4 ; <ptr> [#uses=1]
+ at .str73 = external constant [6 x i8], align 4     ; <ptr> [#uses=0]
+ at .str174 = external constant [5 x i8], align 4    ; <ptr> [#uses=0]
+ at .str275 = external constant [6 x i8], align 4    ; <ptr> [#uses=0]
+ at .str376 = external constant [5 x i8], align 4    ; <ptr> [#uses=0]
+ at .str477 = external constant [6 x i8], align 4    ; <ptr> [#uses=0]
+ at .str578 = external constant [6 x i8], align 4    ; <ptr> [#uses=0]
+ at .str679 = external constant [7 x i8], align 4    ; <ptr> [#uses=0]
+ at .str780 = external constant [6 x i8], align 4    ; <ptr> [#uses=0]
+ at .str881 = external constant [5 x i8], align 4    ; <ptr> [#uses=0]
+ at .str982 = external constant [6 x i8], align 4    ; <ptr> [#uses=0]
+ at .str1083 = external constant [9 x i8], align 4   ; <ptr> [#uses=0]
+ at .str1184 = external constant [7 x i8], align 4   ; <ptr> [#uses=0]
+ at .str1285 = external constant [16 x i8], align 4  ; <ptr> [#uses=0]
+ at .str1386 = external constant [12 x i8], align 4  ; <ptr> [#uses=0]
+ at .str1487 = external constant [5 x i8], align 4   ; <ptr> [#uses=0]
+ at llvm.used = external global [1 x ptr]            ; <ptr> [#uses=0]
 
 define fastcc i32 @parse_percent_token() nounwind {
 entry:
@@ -85,18 +85,18 @@ bb10:                                             ; preds = %bb9
   br label %bb11
 
 bb11:                                             ; preds = %bb10, %bb9
-  %p.0 = phi i8* [ undef, %bb10 ], [ %p.1, %bb9 ] ; <i8*> [#uses=1]
-  %0 = load %struct.FILE*, %struct.FILE** @finput, align 4       ; <%struct.FILE*> [#uses=1]
-  %1 = tail call i32 @getc(%struct.FILE* %0) nounwind ; <i32> [#uses=0]
+  %p.0 = phi ptr [ undef, %bb10 ], [ %p.1, %bb9 ] ; <ptr> [#uses=1]
+  %0 = load ptr, ptr @finput, align 4       ; <ptr> [#uses=1]
+  %1 = tail call i32 @getc(ptr %0) nounwind ; <i32> [#uses=0]
   br label %bb12
 
 bb12:                                             ; preds = %bb11, %bb.i.i
-  %p.1 = phi i8* [ %p.0, %bb11 ], [ getelementptr inbounds ([1025 x i8], [1025 x i8]* @token_buffer, i32 0, i32 0), %bb.i.i ] ; <i8*> [#uses=2]
+  %p.1 = phi ptr [ %p.0, %bb11 ], [ @token_buffer, %bb.i.i ] ; <ptr> [#uses=2]
   %2 = icmp ult i32 undef, 128                    ; <i1> [#uses=1]
   br i1 %2, label %bb.i.i2, label %bb1.i.i3
 
 bb.i.i2:                                          ; preds = %bb12
-  %3 = load i32, i32* null, align 4                    ; <i32> [#uses=1]
+  %3 = load i32, ptr null, align 4                    ; <i32> [#uses=1]
   %4 = lshr i32 %3, 8                             ; <i32> [#uses=1]
   %.lobit.i1 = and i32 %4, 1                      ; <i32> [#uses=1]
   %.not = icmp ne i32 %.lobit.i1, 0               ; <i1> [#uses=1]
@@ -107,7 +107,7 @@ bb1.i.i3:                                         ; preds = %bb12
   unreachable
 
 bb14:                                             ; preds = %bb.i.i2
-  store i8 0, i8* %p.1, align 1
+  store i8 0, ptr %p.1, align 1
   br i1 undef, label %bb43, label %bb15
 
 bb15:                                             ; preds = %bb14
@@ -118,10 +118,10 @@ bb43:                                             ; preds = %bb14, %bb.i.i, %ent
   ret i32 %.0
 }
 
-declare i32 @getc(%struct.FILE* nocapture) nounwind
+declare i32 @getc(ptr nocapture) nounwind
 
-declare i32 @strcmp(i8* nocapture, i8* nocapture) nounwind readonly
+declare i32 @strcmp(ptr nocapture, ptr nocapture) nounwind readonly
 
 declare i32 @__maskrune(i32, i32)
 
-declare i32 @ungetc(i32, %struct.FILE* nocapture) nounwind
+declare i32 @ungetc(i32, ptr nocapture) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll b/llvm/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll
index 3b14d22ddbff0..4350744168c31 100644
--- a/llvm/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -O3 | FileCheck %s
 
- at .str = private constant [4 x i8] c"%d\0A\00", align 4 ; <[4 x i8]*> [#uses=1]
+ at .str = private constant [4 x i8] c"%d\0A\00", align 4 ; <ptr> [#uses=1]
 
 define internal fastcc i32 @Callee(i32 %i) nounwind {
 entry:
@@ -13,10 +13,10 @@ entry:
   br i1 %0, label %bb2, label %bb
 
 bb:                                               ; preds = %entry
-  %1 = alloca [1000 x i8], align 4                ; <[1000 x i8]*> [#uses=1]
-  %.sub = getelementptr inbounds [1000 x i8], [1000 x i8]* %1, i32 0, i32 0 ; <i8*> [#uses=2]
-  %2 = call i32 (i8*, i32, i32, i8*, ...) @__sprintf_chk(i8* %.sub, i32 0, i32 1000, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %i) nounwind ; <i32> [#uses=0]
-  %3 = load i8, i8* %.sub, align 4                    ; <i8> [#uses=1]
+  %1 = alloca [1000 x i8], align 4                ; <ptr> [#uses=1]
+  %.sub = getelementptr inbounds [1000 x i8], ptr %1, i32 0, i32 0 ; <ptr> [#uses=2]
+  %2 = call i32 (ptr, i32, i32, ptr, ...) @__sprintf_chk(ptr %.sub, i32 0, i32 1000, ptr @.str, i32 %i) nounwind ; <i32> [#uses=0]
+  %3 = load i8, ptr %.sub, align 4                    ; <i8> [#uses=1]
   %4 = sext i8 %3 to i32                          ; <i32> [#uses=1]
   ret i32 %4
 
@@ -30,7 +30,7 @@ bb2:                                              ; preds = %entry
   ret i32 0
 }
 
-declare i32 @__sprintf_chk(i8*, i32, i32, i8*, ...) nounwind
+declare i32 @__sprintf_chk(ptr, i32, i32, ptr, ...) nounwind
 
 define i32 @main() nounwind {
 ; CHECK-LABEL: main:
@@ -52,8 +52,8 @@ bb2:                                              ; preds = %bb
 ; CHECK-NOT: mov sp, r7
 ; CHECK-NOT: sub sp, #12
 ; CHECK: pop
-  %4 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
+  %4 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %2) nounwind ; <i32> [#uses=0]
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll b/llvm/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
index 9610cf7ab07a2..dcb384f7701b1 100644
--- a/llvm/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll
@@ -11,7 +11,7 @@ define void @foo() nounwind optsize "frame-pointer"="all" {
 ; CHECK: add r7, sp
 ; CHECK: sub sp, #4
 entry:
-  %m.i = alloca %struct.buf*, align 4
+  %m.i = alloca ptr, align 4
   br label %bb
 
 bb:

diff  --git a/llvm/test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll b/llvm/test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll
index 9878ae862c7a2..d008598ba5e3f 100644
--- a/llvm/test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll
+++ b/llvm/test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll
@@ -16,8 +16,8 @@ entry:
   %size = alloca i32, align 4
   %count = alloca i32, align 4
   %index = alloca i32, align 4
-  %0 = call i32 @foo(i32* %count, i32* %size, i32* %index) nounwind
+  %0 = call i32 @foo(ptr %count, ptr %size, ptr %index) nounwind
   ret i32 %0
 }
 
-declare i32 @foo(i32*, i32*, i32*)
+declare i32 @foo(ptr, ptr, ptr)

diff  --git a/llvm/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll b/llvm/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
index 75bce22ebd246..9120630f1821b 100644
--- a/llvm/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
+++ b/llvm/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=thumbv7-apple-darwin10 -arm-atomic-cfg-tidy=0 < %s | FileCheck %s
 
-%struct.op = type { %struct.op*, %struct.op*, %struct.op* ()*, i32, i16, i16, i8, i8 }
+%struct.op = type { ptr, ptr, ptr, i32, i16, i16, i8, i8 }
 
 ; CHECK: Perl_ck_sort
 ; CHECK: ldr
@@ -9,7 +9,7 @@
 
 define void @Perl_ck_sort() nounwind optsize {
 entry:
-  %tmp27 = load %struct.op*, %struct.op** undef, align 4
+  %tmp27 = load ptr, ptr undef, align 4
   switch i16 undef, label %if.end151 [
     i16 178, label %if.then60
     i16 177, label %if.then60
@@ -19,14 +19,13 @@ if.then60:                                        ; preds = %if.then40
   br i1 undef, label %if.then67, label %if.end95
 
 if.then67:                                        ; preds = %if.then60
-  %op_next71 = getelementptr inbounds %struct.op, %struct.op* %tmp27, i32 0, i32 0
-  store %struct.op* %tmp27, %struct.op** %op_next71, align 4
-  %0 = getelementptr inbounds %struct.op, %struct.op* %tmp27, i32 1, i32 0
+  store ptr %tmp27, ptr %tmp27, align 4
+  %0 = getelementptr inbounds %struct.op, ptr %tmp27, i32 1, i32 0
   br label %if.end95
 
 if.end95:                                         ; preds = %if.else92, %if.then67
-  %.pre-phi = phi %struct.op** [ undef, %if.then60 ], [ %0, %if.then67 ]
-  %tmp98 = load %struct.op*, %struct.op** %.pre-phi, align 4
+  %.pre-phi = phi ptr [ undef, %if.then60 ], [ %0, %if.then67 ]
+  %tmp98 = load ptr, ptr %.pre-phi, align 4
   br label %if.end151
 
 if.end151:                                        ; preds = %if.end100, %if.end, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll b/llvm/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
index 854b42522e7c7..967de3aca5321 100644
--- a/llvm/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
+++ b/llvm/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
@@ -1,40 +1,39 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
 ; RUN: llc < %s -mtriple=thumbv8-none-linux-gnueabi
 
-%struct.LIST_NODE.0.16 = type { %struct.LIST_NODE.0.16*, i8* }
+%struct.LIST_NODE.0.16 = type { ptr, ptr }
 
-define %struct.LIST_NODE.0.16* @list_AssocListPair(%struct.LIST_NODE.0.16* %List, i8* %Key) nounwind readonly {
+define ptr @list_AssocListPair(ptr %List, ptr %Key) nounwind readonly {
 entry:
   br label %bb3
 
 bb:                                               ; preds = %bb3
-  %Scan.0.idx7.val = load i8*, i8** undef, align 4
-  %.idx = getelementptr i8, i8* %Scan.0.idx7.val, i32 4
-  %0 = bitcast i8* %.idx to i8**
-  %.idx.val = load i8*, i8** %0, align 4
-  %1 = icmp eq i8* %.idx.val, %Key
-  br i1 %1, label %bb5, label %bb2
+  %Scan.0.idx7.val = load ptr, ptr undef, align 4
+  %.idx = getelementptr i8, ptr %Scan.0.idx7.val, i32 4
+  %.idx.val = load ptr, ptr %.idx, align 4
+  %0 = icmp eq ptr %.idx.val, %Key
+  br i1 %0, label %bb5, label %bb2
 
 bb2:                                              ; preds = %bb
-  %Scan.0.idx8.val = load %struct.LIST_NODE.0.16*, %struct.LIST_NODE.0.16** undef, align 4
+  %Scan.0.idx8.val = load ptr, ptr undef, align 4
   br label %bb3
 
 bb3:                                              ; preds = %bb2, %entry
-  %Scan.0 = phi %struct.LIST_NODE.0.16* [ %List, %entry ], [ %Scan.0.idx8.val, %bb2 ]
-  %2 = icmp eq %struct.LIST_NODE.0.16* %Scan.0, null
-  br i1 %2, label %bb5, label %bb
+  %Scan.0 = phi ptr [ %List, %entry ], [ %Scan.0.idx8.val, %bb2 ]
+  %1 = icmp eq ptr %Scan.0, null
+  br i1 %1, label %bb5, label %bb
 
 bb5:                                              ; preds = %bb3, %bb
-  ret %struct.LIST_NODE.0.16* null
+  ret ptr null
 }
 
 declare void @use(i32)
-define double @find_max_double(i32 %n, double* nocapture readonly %aa) {
+define double @find_max_double(i32 %n, ptr nocapture readonly %aa) {
 entry:
   br i1 undef, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.body, %entry
-  %0 = load double, double* null, align 8
+  %0 = load double, ptr null, align 8
   %cmp2.6 = fcmp ogt double %0, 0.000000e+00
   %idx.1.6 = select i1 %cmp2.6, i32 undef, i32 0
   %idx.1.7 = select i1 undef, i32 undef, i32 %idx.1.6

diff  --git a/llvm/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll b/llvm/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
index c9d3f3dd2847e..31223377213ff 100644
--- a/llvm/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
+++ b/llvm/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
@@ -11,39 +11,35 @@
 %"myclass" = type { %struct.foo }
 %struct.foo = type { i32, [40 x i8] }
 
-define hidden void @func(i8* %Data) nounwind ssp {
-  %1 = getelementptr inbounds i8, i8* %Data, i32 12
-  %2 = bitcast i8* %1 to %"myclass"*
-  tail call void @abc(%"myclass"* %2) nounwind
-  tail call void @def(%"myclass"* %2) nounwind
-  %3 = getelementptr inbounds i8, i8* %Data, i32 8
-  %4 = bitcast i8* %3 to i8**
-  %5 = load i8*, i8** %4, align 4
-  tail call void @ghi(i8* %5) nounwind
-  %6 = bitcast i8* %Data to void (i8*)**
-  %7 = load void (i8*)*, void (i8*)** %6, align 4
-  %8 = getelementptr inbounds i8, i8* %Data, i32 4
-  %9 = bitcast i8* %8 to i8**
-  %10 = load i8*, i8** %9, align 4
-  %11 = icmp eq i8* %Data, null
-  br i1 %11, label %14, label %12
+define hidden void @func(ptr %Data) nounwind ssp {
+  %1 = getelementptr inbounds i8, ptr %Data, i32 12
+  tail call void @abc(ptr %1) nounwind
+  tail call void @def(ptr %1) nounwind
+  %2 = getelementptr inbounds i8, ptr %Data, i32 8
+  %3 = load ptr, ptr %2, align 4
+  tail call void @ghi(ptr %3) nounwind
+  %4 = load ptr, ptr %Data, align 4
+  %5 = getelementptr inbounds i8, ptr %Data, i32 4
+  %6 = load ptr, ptr %5, align 4
+  %7 = icmp eq ptr %Data, null
+  br i1 %7, label %10, label %8
 
 ; <label>:12                                      ; preds = %0
-  %13 = tail call %"myclass"* @jkl(%"myclass"* %2) nounwind
-  tail call void @mno(i8* %Data) nounwind
-  br label %14
+  %9 = tail call ptr @jkl(ptr %1) nounwind
+  tail call void @mno(ptr %Data) nounwind
+  br label %10
 
-; <label>:14                                      ; preds = %12, %0
-  tail call void %7(i8* %10) nounwind
+; <label>:14                                      ; preds = %8, %0
+  tail call void %4(ptr %6) nounwind
   ret void
 }
 
-declare void @mno(i8*)
+declare void @mno(ptr)
 
-declare void @def(%"myclass"*)
+declare void @def(ptr)
 
-declare void @abc(%"myclass"*)
+declare void @abc(ptr)
 
-declare void @ghi(i8*)
+declare void @ghi(ptr)
 
-declare %"myclass"* @jkl(%"myclass"*) nounwind
+declare ptr @jkl(ptr) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/2013-03-02-vduplane-nonconstant-source-index.ll b/llvm/test/CodeGen/Thumb2/2013-03-02-vduplane-nonconstant-source-index.ll
index 5936b78030028..779b5c09663f9 100644
--- a/llvm/test/CodeGen/Thumb2/2013-03-02-vduplane-nonconstant-source-index.ll
+++ b/llvm/test/CodeGen/Thumb2/2013-03-02-vduplane-nonconstant-source-index.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
 
-define void @bar(<4 x i32>* %p, i32 %lane, <4 x i32> %phitmp) nounwind {
+define void @bar(ptr %p, i32 %lane, <4 x i32> %phitmp) nounwind {
 ; CHECK:  lsls r[[ADDR:[0-9]+]], r[[ADDR]], #2
 ; CHECK:  vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r[[SOURCE:[0-9]+]]:128], r[[ADDR]]
 ; CHECK:  vld1.32 {[[DREG:d[0-9]+]][], [[DREG2:d[0-9]+]][]}, [r[[SOURCE]]:32]
@@ -9,6 +9,6 @@ define void @bar(<4 x i32>* %p, i32 %lane, <4 x i32> %phitmp) nounwind {
   %r1 = insertelement <4 x i32> undef, i32 %val, i32 1
   %r2 = insertelement <4 x i32> %r1, i32 %val, i32 2
   %r3 = insertelement <4 x i32> %r2, i32 %val, i32 3
-  store <4 x i32> %r3, <4 x i32>* %p, align 4
+  store <4 x i32> %r3, ptr %p, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll
index 07cf09c9dfeaf..165e73c2e8827 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll
@@ -9,36 +9,36 @@
 ; CHECK-MID: bb.1.for.header:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ne i32 %count.next, 0
   br i1 %cmp, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -53,36 +53,36 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_ugt_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_ugt_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ugt i32 %count.next, 0
   br i1 %cmp, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -97,36 +97,36 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_ult_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_ult_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ult i32 %count.next, 1
   br i1 %cmp, label %for.cond.cleanup, label %for.header
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -141,37 +141,37 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_ult_xor_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_ult_xor_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ult i32 %count.next, 1
   %negate = xor i1 %cmp, 1
   br i1 %negate, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -186,36 +186,36 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_sgt_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_sgt_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp sgt i32 %count.next, 0
   br i1 %cmp, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -230,36 +230,36 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_sge_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_sge_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp sge i32 %count.next, 1
   br i1 %cmp, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -274,37 +274,37 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_sge_xor_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_sge_xor_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp sge i32 %count.next, 1
   %negated = xor i1 %cmp, 1
   br i1 %negated, label %for.cond.cleanup, label %for.header
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -319,36 +319,36 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_uge_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_uge_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp uge i32 %count.next, 1
   br i1 %cmp, label %for.header, label %for.cond.cleanup
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -363,37 +363,37 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECK-MID:   tB %bb.2, 14
 ; CHECK-MID: bb.2.for.cond.cleanup:
-define void @check_loop_dec_uge_xor_brcond_combine(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+define void @check_loop_dec_uge_xor_brcond_combine(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
   br label %for.body.preheader
 
 for.body.preheader:
-  %scevgep = getelementptr i32, i32* %a, i32 -1
-  %scevgep4 = getelementptr i32, i32* %c, i32 -1
-  %scevgep8 = getelementptr i32, i32* %b, i32 -1
+  %scevgep = getelementptr i32, ptr %a, i32 -1
+  %scevgep4 = getelementptr i32, ptr %c, i32 -1
+  %scevgep8 = getelementptr i32, ptr %b, i32 -1
   br label %for.header
 
 for.body:
-  %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-  %ld1 = load i32, i32* %scevgep11, align 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %ld2 = load i32, i32* %scevgep7, align 4
+  %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+  %ld1 = load i32, ptr %scevgep11, align 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %ld2 = load i32, ptr %scevgep7, align 4
   %mul = mul nsw i32 %ld2, %ld1
-  %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-  store i32 %mul, i32* %scevgep3, align 4
-  %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-  %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+  %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+  store i32 %mul, ptr %scevgep3, align 4
+  %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+  %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp uge i32 %count.next, 1
   %negated = xor i1 %cmp, 1
   br i1 %negated, label %for.cond.cleanup, label %for.header
 
 for.header:
-  %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-  %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-  %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+  %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+  %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+  %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
   %count = phi i32 [ %start, %for.body.preheader ], [ %count.next, %for.body ]
   br label %for.body
 
@@ -408,7 +408,7 @@ for.cond.cleanup:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECk-MID:   tB %bb.2
 ; CHECK-MID: bb.2.while.end:
-define void @check_negated_xor_wls(i16* nocapture %a, i16* nocapture readonly %b, i32 %N) {
+define void @check_negated_xor_wls(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %wls = call {i32, i1} @llvm.test.start.loop.iterations.i32(i32 %N)
   %wls0 = extractvalue {i32, i1} %wls, 0
@@ -420,13 +420,13 @@ while.body.preheader:
   br label %while.body
 
 while.body:
-  %a.addr.06 = phi i16* [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
-  %b.addr.05 = phi i16* [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
+  %a.addr.06 = phi ptr [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
+  %b.addr.05 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
   %count = phi i32 [ %wls0, %while.body.preheader ], [ %count.next, %while.body ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %b.addr.05, i32 1
-  %ld.b = load i16, i16* %b.addr.05, align 2
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %a.addr.06, i32 1
-  store i16 %ld.b, i16* %a.addr.06, align 2
+  %incdec.ptr = getelementptr inbounds i16, ptr %b.addr.05, i32 1
+  %ld.b = load i16, ptr %b.addr.05, align 2
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %a.addr.06, i32 1
+  store i16 %ld.b, ptr %a.addr.06, align 2
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ne i32 %count.next, 0
   br i1 %cmp, label %while.body, label %while.end
@@ -442,7 +442,7 @@ while.end:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECk-MID:   tB %bb.2
 ; CHECK-MID: bb.2.while.end:
-define void @check_negated_cmp_wls(i16* nocapture %a, i16* nocapture readonly %b, i32 %N) {
+define void @check_negated_cmp_wls(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %wls = call {i32, i1} @llvm.test.start.loop.iterations.i32(i32 %N)
   %wls0 = extractvalue {i32, i1} %wls, 0
@@ -454,13 +454,13 @@ while.body.preheader:
   br label %while.body
 
 while.body:
-  %a.addr.06 = phi i16* [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
-  %b.addr.05 = phi i16* [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
+  %a.addr.06 = phi ptr [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
+  %b.addr.05 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
   %count = phi i32 [ %wls0, %while.body.preheader ], [ %count.next, %while.body ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %b.addr.05, i32 1
-  %ld.b = load i16, i16* %b.addr.05, align 2
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %a.addr.06, i32 1
-  store i16 %ld.b, i16* %a.addr.06, align 2
+  %incdec.ptr = getelementptr inbounds i16, ptr %b.addr.05, i32 1
+  %ld.b = load i16, ptr %b.addr.05, align 2
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %a.addr.06, i32 1
+  store i16 %ld.b, ptr %a.addr.06, align 2
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp.1 = icmp ne i32 %count.next, 0
   br i1 %cmp.1, label %while.body, label %while.end
@@ -476,7 +476,7 @@ while.end:
 ; CHECK-MID:   renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1
 ; CHECk-MID:   tB %bb.2
 ; CHECK-MID: bb.2.while.end:
-define void @check_negated_reordered_wls(i16* nocapture %a, i16* nocapture readonly %b, i32 %N) {
+define void @check_negated_reordered_wls(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   br label %while
 
@@ -484,13 +484,13 @@ while.body.preheader:
   br label %while.body
 
 while.body:
-  %a.addr.06 = phi i16* [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
-  %b.addr.05 = phi i16* [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
+  %a.addr.06 = phi ptr [ %incdec.ptr1, %while.body ], [ %a, %while.body.preheader ]
+  %b.addr.05 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.body.preheader ]
   %count = phi i32 [ %wls0, %while.body.preheader ], [ %count.next, %while.body ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %b.addr.05, i32 1
-  %ld.b = load i16, i16* %b.addr.05, align 2
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %a.addr.06, i32 1
-  store i16 %ld.b, i16* %a.addr.06, align 2
+  %incdec.ptr = getelementptr inbounds i16, ptr %b.addr.05, i32 1
+  %ld.b = load i16, ptr %b.addr.05, align 2
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %a.addr.06, i32 1
+  store i16 %ld.b, ptr %a.addr.06, align 2
   %count.next = call i32 @llvm.loop.decrement.reg.i32(i32 %count, i32 1)
   %cmp = icmp ne i32 %count.next, 0
   br i1 %cmp, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/clear-maskedinsts.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/clear-maskedinsts.ll
index 81e1a3c3c5561..9621897c9510f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/clear-maskedinsts.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/clear-maskedinsts.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=thumbv8.1m.main -mattr=+mve.fp -mve-tail-predication -tail-predication=enabled %s -S -o - | FileCheck %s
 
-define hidden i32 @_Z4loopPiPjiS0_i(i32* noalias nocapture readonly %s1, i32* noalias nocapture readonly %s2, i32 %x, i32* noalias nocapture %d, i32 %n) {
+define hidden i32 @_Z4loopPiPjiS0_i(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, i32 %x, ptr noalias nocapture %d, i32 %n) {
 ; CHECK-LABEL: @_Z4loopPiPjiS0_i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP63:%.*]] = icmp sgt i32 [[N:%.*]], 0
@@ -25,11 +25,10 @@ define hidden i32 @_Z4loopPiPjiS0_i(i32* noalias nocapture readonly %s1, i32* no
 ; CHECK-NEXT:    [[START2:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP3]])
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
-; CHECK-NEXT:    [[LSR_IV9:%.*]] = phi i32* [ [[SCEVGEP10:%.*]], [[VECTOR_BODY]] ], [ [[D:%.*]], [[VECTOR_PH]] ]
+; CHECK-NEXT:    [[LSR_IV9:%.*]] = phi ptr [ [[SCEVGEP10:%.*]], [[VECTOR_BODY]] ], [ [[D:%.*]], [[VECTOR_PH]] ]
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP4:%.*]] = phi i32 [ [[START2]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV911:%.*]] = bitcast i32* [[LSR_IV9]] to <4 x i32>*
 ; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
 ; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
@@ -37,35 +36,32 @@ define hidden i32 @_Z4loopPiPjiS0_i(i32* noalias nocapture readonly %s1, i32* no
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP5]])
 ; CHECK-NEXT:    [[TMP9]] = sub i32 [[TMP5]], 4
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[BROADCAST_SPLAT72]], <4 x i32>* [[LSR_IV911]], i32 4, <4 x i1> [[TMP8]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[BROADCAST_SPLAT72]], ptr [[LSR_IV9]], i32 4, <4 x i1> [[TMP8]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
-; CHECK-NEXT:    [[SCEVGEP10]] = getelementptr i32, i32* [[LSR_IV9]], i32 4
+; CHECK-NEXT:    [[SCEVGEP10]] = getelementptr i32, ptr [[LSR_IV9]], i32 4
 ; CHECK-NEXT:    [[TMP10]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP4]], i32 1)
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
 ; CHECK-NEXT:    br i1 [[TMP11]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP]]
 ; CHECK:       vector.body75:
-; CHECK-NEXT:    [[LSR_IV6:%.*]] = phi i32* [ [[S1:%.*]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP7:%.*]], [[VECTOR_BODY75]] ]
-; CHECK-NEXT:    [[LSR_IV3:%.*]] = phi i32* [ [[S2:%.*]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP4:%.*]], [[VECTOR_BODY75]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32* [ [[D]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP:%.*]], [[VECTOR_BODY75]] ]
+; CHECK-NEXT:    [[LSR_IV6:%.*]] = phi ptr [ [[S1:%.*]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP7:%.*]], [[VECTOR_BODY75]] ]
+; CHECK-NEXT:    [[LSR_IV3:%.*]] = phi ptr [ [[S2:%.*]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP4:%.*]], [[VECTOR_BODY75]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[D]], [[VECTOR_BODY75_PREHEADER]] ], [ [[SCEVGEP:%.*]], [[VECTOR_BODY75]] ]
 ; CHECK-NEXT:    [[INDEX80:%.*]] = phi i32 [ [[INDEX_NEXT81:%.*]], [[VECTOR_BODY75]] ], [ 0, [[VECTOR_BODY75_PREHEADER]] ]
 ; CHECK-NEXT:    [[TMP12:%.*]] = phi i32 [ [[START1]], [[VECTOR_BODY75_PREHEADER]] ], [ [[TMP17:%.*]], [[VECTOR_BODY75]] ]
-; CHECK-NEXT:    [[LSR_IV68:%.*]] = bitcast i32* [[LSR_IV6]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV35:%.*]] = bitcast i32* [[LSR_IV3]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV2:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
 ; CHECK-NEXT:    [[BROADCAST_SPLATINSERT84:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX80]], i32 0
 ; CHECK-NEXT:    [[BROADCAST_SPLAT85:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT84]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[INDUCTION86:%.*]] = add <4 x i32> [[BROADCAST_SPLAT85]], <i32 0, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_183]], i32 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = shufflevector <4 x i32> [[TMP13]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP15:%.*]] = icmp ule <4 x i32> [[INDUCTION86]], [[TMP14]]
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV68]], i32 4, <4 x i1> [[TMP15]], <4 x i32> undef)
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD89:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV35]], i32 4, <4 x i1> [[TMP15]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV6]], i32 4, <4 x i1> [[TMP15]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD89:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV3]], i32 4, <4 x i1> [[TMP15]], <4 x i32> undef)
 ; CHECK-NEXT:    [[TMP16:%.*]] = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> [[WIDE_MASKED_LOAD89]], <4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP16]], <4 x i32>* [[LSR_IV2]], i32 4, <4 x i1> [[TMP15]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP16]], ptr [[LSR_IV]], i32 4, <4 x i1> [[TMP15]])
 ; CHECK-NEXT:    [[INDEX_NEXT81]] = add i32 [[INDEX80]], 4
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
-; CHECK-NEXT:    [[SCEVGEP4]] = getelementptr i32, i32* [[LSR_IV3]], i32 4
-; CHECK-NEXT:    [[SCEVGEP7]] = getelementptr i32, i32* [[LSR_IV6]], i32 4
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4
+; CHECK-NEXT:    [[SCEVGEP4]] = getelementptr i32, ptr [[LSR_IV3]], i32 4
+; CHECK-NEXT:    [[SCEVGEP7]] = getelementptr i32, ptr [[LSR_IV6]], i32 4
 ; CHECK-NEXT:    [[TMP17]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP12]], i32 1)
 ; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
 ; CHECK-NEXT:    br i1 [[TMP18]], label [[VECTOR_BODY75]], label [[FOR_COND_CLEANUP]]
@@ -98,46 +94,42 @@ vector.ph:                                        ; preds = %for.body.lr.ph
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv9 = phi i32* [ %scevgep10, %vector.body ], [ %d, %vector.ph ]
+  %lsr.iv9 = phi ptr [ %scevgep10, %vector.body ], [ %d, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %4 = phi i32 [ %start2, %vector.ph ], [ %8, %vector.body ]
-  %lsr.iv911 = bitcast i32* %lsr.iv9 to <4 x i32>*
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
   %5 = insertelement <4 x i32> undef, i32 %trip.count.minus.183, i32 0
   %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> zeroinitializer
   %7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %broadcast.splat72, <4 x i32>* %lsr.iv911, i32 4, <4 x i1> %7)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %broadcast.splat72, ptr %lsr.iv9, i32 4, <4 x i1> %7)
   %index.next = add i32 %index, 4
-  %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 4
+  %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 4
   %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %4, i32 1)
   %9 = icmp ne i32 %8, 0
   br i1 %9, label %vector.body, label %for.cond.cleanup
 
 vector.body75:                                    ; preds = %vector.body75, %vector.body75.preheader
-  %lsr.iv6 = phi i32* [ %s1, %vector.body75.preheader ], [ %scevgep7, %vector.body75 ]
-  %lsr.iv3 = phi i32* [ %s2, %vector.body75.preheader ], [ %scevgep4, %vector.body75 ]
-  %lsr.iv = phi i32* [ %d, %vector.body75.preheader ], [ %scevgep, %vector.body75 ]
+  %lsr.iv6 = phi ptr [ %s1, %vector.body75.preheader ], [ %scevgep7, %vector.body75 ]
+  %lsr.iv3 = phi ptr [ %s2, %vector.body75.preheader ], [ %scevgep4, %vector.body75 ]
+  %lsr.iv = phi ptr [ %d, %vector.body75.preheader ], [ %scevgep, %vector.body75 ]
   %index80 = phi i32 [ %index.next81, %vector.body75 ], [ 0, %vector.body75.preheader ]
   %10 = phi i32 [ %start1, %vector.body75.preheader ], [ %15, %vector.body75 ]
-  %lsr.iv68 = bitcast i32* %lsr.iv6 to <4 x i32>*
-  %lsr.iv35 = bitcast i32* %lsr.iv3 to <4 x i32>*
-  %lsr.iv2 = bitcast i32* %lsr.iv to <4 x i32>*
   %broadcast.splatinsert84 = insertelement <4 x i32> undef, i32 %index80, i32 0
   %broadcast.splat85 = shufflevector <4 x i32> %broadcast.splatinsert84, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction86 = add <4 x i32> %broadcast.splat85, <i32 0, i32 1, i32 2, i32 3>
   %11 = insertelement <4 x i32> undef, i32 %trip.count.minus.183, i32 0
   %12 = shufflevector <4 x i32> %11, <4 x i32> undef, <4 x i32> zeroinitializer
   %13 = icmp ule <4 x i32> %induction86, %12
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv68, i32 4, <4 x i1> %13, <4 x i32> undef)
-  %wide.masked.load89 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv35, i32 4, <4 x i1> %13, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv6, i32 4, <4 x i1> %13, <4 x i32> undef)
+  %wide.masked.load89 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv3, i32 4, <4 x i1> %13, <4 x i32> undef)
   %14 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %wide.masked.load89, <4 x i32> %wide.masked.load)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %14, <4 x i32>* %lsr.iv2, i32 4, <4 x i1> %13)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %14, ptr %lsr.iv, i32 4, <4 x i1> %13)
   %index.next81 = add i32 %index80, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep4 = getelementptr i32, i32* %lsr.iv3, i32 4
-  %scevgep7 = getelementptr i32, i32* %lsr.iv6, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep4 = getelementptr i32, ptr %lsr.iv3, i32 4
+  %scevgep7 = getelementptr i32, ptr %lsr.iv6, i32 4
   %15 = call i32 @llvm.loop.decrement.reg.i32(i32 %10, i32 1)
   %16 = icmp ne i32 %15, 0
   br i1 %16, label %vector.body75, label %for.cond.cleanup
@@ -145,8 +137,8 @@ vector.body75:                                    ; preds = %vector.body75, %vec
 for.cond.cleanup:                                 ; preds = %vector.body, %vector.body75, %entry
   ret i32 0
 }
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
 declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
index cb773718f7f5a..9346098f0371b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled --verify-machineinstrs %s -o - | FileCheck %s
 
-define dso_local i32 @vpsel_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c, i32 %N) {
+define dso_local i32 @vpsel_mul_reduce_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %c, i32 %N) {
 ; CHECK-LABEL: vpsel_mul_reduce_add:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #0
@@ -52,16 +52,13 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %add, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp5 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp6 = bitcast i32* %tmp5 to <4 x i32>*
-  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp6, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp5 = getelementptr inbounds i32, ptr %c, i32 %index
+  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp5, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %rem = urem i32 %index, 16
   %rem.broadcast.splatinsert = insertelement <4 x i32> undef, i32 %rem, i32 0
   %rem.broadcast.splat = shufflevector <4 x i32> %rem.broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -83,7 +80,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret i32 %res.0.lcssa
 }
 
-define dso_local i32 @vpsel_mul_reduce_add_2(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b,
+define dso_local i32 @vpsel_mul_reduce_add_2(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b,
 ; CHECK-LABEL: vpsel_mul_reduce_add_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
@@ -128,7 +125,7 @@ define dso_local i32 @vpsel_mul_reduce_add_2(i32* noalias nocapture readonly %a,
 ; CHECK-NEXT:    movs r0, #0
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
-                                         i32* noalias nocapture readonly %c, i32* noalias nocapture readonly %d, i32 %N) {
+                                         ptr noalias nocapture readonly %c, ptr noalias nocapture readonly %d, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
@@ -141,19 +138,15 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %add, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp5 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp6 = bitcast i32* %tmp5 to <4 x i32>*
-  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp6, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp7 = getelementptr inbounds i32, i32* %d, i32 %index
-  %tmp8 = bitcast i32* %tmp7 to <4 x i32>*
-  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp8, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp5 = getelementptr inbounds i32, ptr %c, i32 %index
+  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp5, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp7 = getelementptr inbounds i32, ptr %d, i32 %index
+  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp7, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %sub = sub <4 x i32> %wide.masked.load.c, %wide.masked.load.d
   %rem = urem i32 %index, 16
   %rem.broadcast.splatinsert = insertelement <4 x i32> undef, i32 %rem, i32 0
@@ -176,7 +169,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret i32 %res.0.lcssa
 }
 
-define dso_local i32 @and_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b,
+define dso_local i32 @and_mul_reduce_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b,
 ; CHECK-LABEL: and_mul_reduce_add:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, lr}
@@ -219,7 +212,7 @@ define dso_local i32 @and_mul_reduce_add(i32* noalias nocapture readonly %a, i32
 ; CHECK-NEXT:    movs r0, #0
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop {r4, pc}
-                                         i32* noalias nocapture readonly %c, i32* noalias nocapture readonly %d, i32 %N) {
+                                         ptr noalias nocapture readonly %c, ptr noalias nocapture readonly %d, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
@@ -232,22 +225,18 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %add, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %sub = sub <4 x i32> %wide.masked.load.a, %wide.masked.load.b
   %cmp = icmp eq <4 x i32> %sub, <i32 0, i32 0, i32 0, i32 0>
   %mask = and <4 x i1> %cmp, %tmp1
-  %tmp5 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp6 = bitcast i32* %tmp5 to <4 x i32>*
-  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp6, i32 4, <4 x i1> %mask, <4 x i32> undef)
-  %tmp7 = getelementptr inbounds i32, i32* %d, i32 %index
-  %tmp8 = bitcast i32* %tmp7 to <4 x i32>*
-  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp8, i32 4, <4 x i1> %mask, <4 x i32> undef)
+  %tmp5 = getelementptr inbounds i32, ptr %c, i32 %index
+  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp5, i32 4, <4 x i1> %mask, <4 x i32> undef)
+  %tmp7 = getelementptr inbounds i32, ptr %d, i32 %index
+  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp7, i32 4, <4 x i1> %mask, <4 x i32> undef)
   %mul = mul  <4 x i32> %wide.masked.load.c, %wide.masked.load.d
   %add = add  <4 x i32> %mul, %vec.phi
   %index.next = add i32 %index, 4
@@ -264,7 +253,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret i32 %res.0.lcssa
 }
 
-define dso_local i32 @or_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c, i32* noalias nocapture readonly %d, i32 %N) {
+define dso_local i32 @or_mul_reduce_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %c, ptr noalias nocapture readonly %d, i32 %N) {
 ; CHECK-LABEL: or_mul_reduce_add:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, lr}
@@ -320,22 +309,18 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %add, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %sub = sub <4 x i32> %wide.masked.load.a, %wide.masked.load.b
   %cmp = icmp eq <4 x i32> %sub, <i32 0, i32 0, i32 0, i32 0>
   %mask = or <4 x i1> %cmp, %tmp1
-  %tmp5 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp6 = bitcast i32* %tmp5 to <4 x i32>*
-  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp6, i32 4, <4 x i1> %mask, <4 x i32> undef)
-  %tmp7 = getelementptr inbounds i32, i32* %d, i32 %index
-  %tmp8 = bitcast i32* %tmp7 to <4 x i32>*
-  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp8, i32 4, <4 x i1> %mask, <4 x i32> undef)
+  %tmp5 = getelementptr inbounds i32, ptr %c, i32 %index
+  %wide.masked.load.c = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp5, i32 4, <4 x i1> %mask, <4 x i32> undef)
+  %tmp7 = getelementptr inbounds i32, ptr %d, i32 %index
+  %wide.masked.load.d = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp7, i32 4, <4 x i1> %mask, <4 x i32> undef)
   %mul = mul  <4 x i32> %wide.masked.load.c, %wide.masked.load.d
   %add = add  <4 x i32> %mul, %vec.phi
   %index.next = add i32 %index, 4
@@ -352,7 +337,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret i32 %res.0.lcssa
 }
 
-define dso_local void @continue_on_zero(i32* noalias nocapture %arg, i32* noalias nocapture readonly %arg1, i32 %arg2) {
+define dso_local void @continue_on_zero(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 %arg2) {
 ; CHECK-LABEL: continue_on_zero:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    push {r7, lr}
@@ -383,18 +368,15 @@ bb3:                                              ; preds = %bb
 
 bb9:                                              ; preds = %bb9, %bb3
   %tmp10 = phi i32 [ 0, %bb3 ], [ %tmp25, %bb9 ]
-  %tmp14 = getelementptr inbounds i32, i32* %arg1, i32 %tmp10
+  %tmp14 = getelementptr inbounds i32, ptr %arg1, i32 %tmp10
   %tmp15 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %tmp10, i32 %arg2)
-  %tmp16 = bitcast i32* %tmp14 to <4 x i32>*
-  %tmp17 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp16, i32 4, <4 x i1> %tmp15, <4 x i32> undef)
+  %tmp17 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp14, i32 4, <4 x i1> %tmp15, <4 x i32> undef)
   %tmp18 = icmp ne <4 x i32> %tmp17, zeroinitializer
-  %tmp19 = getelementptr inbounds i32, i32* %arg, i32 %tmp10
+  %tmp19 = getelementptr inbounds i32, ptr %arg, i32 %tmp10
   %tmp20 = and <4 x i1> %tmp18, %tmp15
-  %tmp21 = bitcast i32* %tmp19 to <4 x i32>*
-  %tmp22 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp21, i32 4, <4 x i1> %tmp20, <4 x i32> undef)
+  %tmp22 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp19, i32 4, <4 x i1> %tmp20, <4 x i32> undef)
   %tmp23 = mul nsw <4 x i32> %tmp22, %tmp17
-  %tmp24 = bitcast i32* %tmp19 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp23, <4 x i32>* %tmp24, i32 4, <4 x i1> %tmp20)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp23, ptr %tmp19, i32 4, <4 x i1> %tmp20)
   %tmp25 = add i32 %tmp10, 4
   %tmp26 = icmp eq i32 %tmp25, %tmp5
   br i1 %tmp26, label %bb27, label %bb9
@@ -403,7 +385,7 @@ bb27:                                             ; preds = %bb9, %bb
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @range_test(i32* noalias nocapture %arg, i32* noalias nocapture readonly %arg1, i32 %arg2, i32 %arg3) {
+define dso_local arm_aapcs_vfpcc void @range_test(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: range_test:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    push {r7, lr}
@@ -438,20 +420,17 @@ bb4:                                              ; preds = %bb
 
 bb12:                                             ; preds = %bb12, %bb4
   %tmp13 = phi i32 [ 0, %bb4 ], [ %tmp30, %bb12 ]
-  %tmp17 = getelementptr inbounds i32, i32* %arg, i32 %tmp13
+  %tmp17 = getelementptr inbounds i32, ptr %arg, i32 %tmp13
   %tmp18= call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %tmp13, i32 %arg3)
-  %tmp19 = bitcast i32* %tmp17 to <4 x i32>*
-  %tmp20 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp19, i32 4, <4 x i1> %tmp18, <4 x i32> undef)
+  %tmp20 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp17, i32 4, <4 x i1> %tmp18, <4 x i32> undef)
   %tmp21 = icmp ne <4 x i32> %tmp20, zeroinitializer
   %tmp22 = icmp sle <4 x i32> %tmp20, %tmp11
-  %tmp23 = getelementptr inbounds i32, i32* %arg1, i32 %tmp13
+  %tmp23 = getelementptr inbounds i32, ptr %arg1, i32 %tmp13
   %tmp24 = and <4 x i1> %tmp22, %tmp21
   %tmp25 = and <4 x i1> %tmp24, %tmp18
-  %tmp26 = bitcast i32* %tmp23 to <4 x i32>*
-  %tmp27 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp26, i32 4, <4 x i1> %tmp25, <4 x i32> undef)
+  %tmp27 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp23, i32 4, <4 x i1> %tmp25, <4 x i32> undef)
   %tmp28 = mul nsw <4 x i32> %tmp27, %tmp20
-  %tmp29 = bitcast i32* %tmp17 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp28, <4 x i32>* %tmp29, i32 4, <4 x i1> %tmp25)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp28, ptr %tmp17, i32 4, <4 x i1> %tmp25)
   %tmp30 = add i32 %tmp13, 4
   %tmp31 = icmp eq i32 %tmp30, %tmp6
   br i1 %tmp31, label %bb32, label %bb12
@@ -461,8 +440,8 @@ bb32:                                             ; preds = %bb12, %bb
 }
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
 
 ; Function Attrs: nounwind readnone willreturn
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/exitcount.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/exitcount.ll
index 548871b85b023..b9a80af649f29 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/exitcount.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/exitcount.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
-%struct.SpeexPreprocessState_ = type { i32, i32, half*, half* }
+%struct.SpeexPreprocessState_ = type { i32, i32, ptr, ptr }
 
-define void @foo(%struct.SpeexPreprocessState_* nocapture readonly %st, i16* %x) {
+define void @foo(ptr nocapture readonly %st, ptr %x) {
 ; CHECK-LABEL: foo:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -32,52 +32,47 @@ define void @foo(%struct.SpeexPreprocessState_* nocapture readonly %st, i16* %x)
 ; CHECK-NEXT:  @ %bb.4: @ %do.end13
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %ps_size = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 1
-  %0 = load i32, i32* %ps_size, align 4
+  %ps_size = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 1
+  %0 = load i32, ptr %ps_size, align 4
   %mul = shl nsw i32 %0, 1
-  %frame_size = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 0
-  %1 = load i32, i32* %frame_size, align 4
+  %1 = load i32, ptr %st, align 4
   %sub = sub nsw i32 %mul, %1
-  %inbuf = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 3
-  %2 = load half*, half** %inbuf, align 4
-  %frame = getelementptr inbounds %struct.SpeexPreprocessState_, %struct.SpeexPreprocessState_* %st, i32 0, i32 2
-  %3 = load half*, half** %frame, align 4
+  %inbuf = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 3
+  %2 = load ptr, ptr %inbuf, align 4
+  %frame = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 2
+  %3 = load ptr, ptr %frame, align 4
   br label %do.body
 
 do.body:                                          ; preds = %do.body, %entry
-  %pinbuff16.0 = phi half* [ %2, %entry ], [ %add.ptr, %do.body ]
+  %pinbuff16.0 = phi ptr [ %2, %entry ], [ %add.ptr, %do.body ]
   %blkCnt.0 = phi i32 [ %sub, %entry ], [ %sub2, %do.body ]
-  %pframef16.0 = phi half* [ %3, %entry ], [ %add.ptr1, %do.body ]
+  %pframef16.0 = phi ptr [ %3, %entry ], [ %add.ptr1, %do.body ]
   %4 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.0)
-  %5 = bitcast half* %pinbuff16.0 to <8 x half>*
-  %6 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %5, i32 2, <8 x i1> %4, <8 x half> zeroinitializer)
-  %7 = bitcast half* %pframef16.0 to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %6, <8 x half>* %7, i32 2, <8 x i1> %4)
-  %add.ptr = getelementptr inbounds half, half* %pinbuff16.0, i32 8
-  %add.ptr1 = getelementptr inbounds half, half* %pframef16.0, i32 8
+  %5 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0(ptr %pinbuff16.0, i32 2, <8 x i1> %4, <8 x half> zeroinitializer)
+  tail call void @llvm.masked.store.v8f16.p0(<8 x half> %5, ptr %pframef16.0, i32 2, <8 x i1> %4)
+  %add.ptr = getelementptr inbounds half, ptr %pinbuff16.0, i32 8
+  %add.ptr1 = getelementptr inbounds half, ptr %pframef16.0, i32 8
   %sub2 = add nsw i32 %blkCnt.0, -8
   %cmp = icmp sgt i32 %blkCnt.0, 8
   br i1 %cmp, label %do.body, label %do.end
 
 do.end:                                           ; preds = %do.body
-  %8 = load half*, half** %frame, align 4
-  %add.ptr4 = getelementptr inbounds half, half* %8, i32 %sub
-  %9 = load i32, i32* %frame_size, align 4
+  %6 = load ptr, ptr %frame, align 4
+  %add.ptr4 = getelementptr inbounds half, ptr %6, i32 %sub
+  %7 = load i32, ptr %st, align 4
   br label %do.body6
 
 do.body6:                                         ; preds = %do.body6, %do.end
-  %px.0 = phi i16* [ %x, %do.end ], [ %add.ptr8, %do.body6 ]
-  %blkCnt.1 = phi i32 [ %9, %do.end ], [ %sub10, %do.body6 ]
-  %pframef16.1 = phi half* [ %add.ptr4, %do.end ], [ %add.ptr9, %do.body6 ]
-  %10 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.1)
-  %11 = bitcast i16* %px.0 to <8 x i16>*
-  %12 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %11, i32 2, <8 x i1> %10, <8 x i16> zeroinitializer)
-  %13 = tail call fast <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %12, i32 0, <8 x i1> %10, <8 x half> undef)
-  %14 = tail call fast <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half> %13, <8 x half> <half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800>, <8 x i1> %10, <8 x half> undef)
-  %15 = bitcast half* %pframef16.1 to <8 x half>*
-  tail call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %14, <8 x half>* %15, i32 2, <8 x i1> %10)
-  %add.ptr8 = getelementptr inbounds i16, i16* %px.0, i32 8
-  %add.ptr9 = getelementptr inbounds half, half* %pframef16.1, i32 8
+  %px.0 = phi ptr [ %x, %do.end ], [ %add.ptr8, %do.body6 ]
+  %blkCnt.1 = phi i32 [ %7, %do.end ], [ %sub10, %do.body6 ]
+  %pframef16.1 = phi ptr [ %add.ptr4, %do.end ], [ %add.ptr9, %do.body6 ]
+  %8 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.1)
+  %9 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %px.0, i32 2, <8 x i1> %8, <8 x i16> zeroinitializer)
+  %10 = tail call fast <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %9, i32 0, <8 x i1> %8, <8 x half> undef)
+  %11 = tail call fast <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half> %10, <8 x half> <half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800>, <8 x i1> %8, <8 x half> undef)
+  tail call void @llvm.masked.store.v8f16.p0(<8 x half> %11, ptr %pframef16.1, i32 2, <8 x i1> %8)
+  %add.ptr8 = getelementptr inbounds i16, ptr %px.0, i32 8
+  %add.ptr9 = getelementptr inbounds half, ptr %pframef16.1, i32 8
   %sub10 = add nsw i32 %blkCnt.1, -8
   %cmp12 = icmp sgt i32 %blkCnt.1, 8
   br i1 %cmp12, label %do.body6, label %do.end13
@@ -88,11 +83,11 @@ do.end13:                                         ; preds = %do.body6
 
 declare <8 x i1> @llvm.arm.mve.vctp16(i32)
 
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>)
 
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)
 
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
 
 declare <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16>, i32, <8 x i1>, <8 x half>)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
index 01564487b576a..f970c903a8639 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled %s -o - | FileCheck %s
 
-define dso_local arm_aapcs_vfpcc void @sext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sext_i8(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) {
 ; CHECK-LABEL: sext_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -30,27 +30,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %0 = getelementptr inbounds i8, ptr %b, i32 %index
   %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
-  %2 = bitcast i8* %0 to <8 x i8>*
-  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
-  %3 = sext <8 x i8> %wide.masked.load to <8 x i16>
-  %4 = getelementptr inbounds i16, i16* %a, i32 %index
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
-  %6 = add <8 x i16> %wide.masked.load12, %3
-  %7 = bitcast i16* %4 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %0, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %2 = sext <8 x i8> %wide.masked.load to <8 x i16>
+  %3 = getelementptr inbounds i16, ptr %a, i32 %index
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %3, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %4 = add <8 x i16> %wide.masked.load12, %2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %4, ptr %3, i32 2, <8 x i1> %1)
   %index.next = add i32 %index, 8
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local arm_aapcs_vfpcc void @zext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+define dso_local arm_aapcs_vfpcc void @zext_i8(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: zext_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -79,27 +76,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %0 = getelementptr inbounds i8, ptr %b, i32 %index
   %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
-  %2 = bitcast i8* %0 to <8 x i8>*
-  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
-  %3 = zext <8 x i8> %wide.masked.load to <8 x i16>
-  %4 = getelementptr inbounds i16, i16* %a, i32 %index
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
-  %6 = add <8 x i16> %wide.masked.load12, %3
-  %7 = bitcast i16* %4 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %0, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %2 = zext <8 x i8> %wide.masked.load to <8 x i16>
+  %3 = getelementptr inbounds i16, ptr %a, i32 %index
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %3, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %4 = add <8 x i16> %wide.masked.load12, %2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %4, ptr %3, i32 2, <8 x i1> %1)
   %index.next = add i32 %index, 8
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local arm_aapcs_vfpcc void @sext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+define dso_local arm_aapcs_vfpcc void @sext_i16(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: sext_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -128,27 +122,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %0 = getelementptr inbounds i16, ptr %b, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %2 = bitcast i16* %0 to <4 x i16>*
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
-  %3 = sext <4 x i16> %wide.masked.load to <4 x i32>
-  %4 = getelementptr inbounds i32, i32* %a, i32 %index
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %6 = add nsw <4 x i32> %wide.masked.load10, %3
-  %7 = bitcast i32* %4 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %0, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %2 = sext <4 x i16> %wide.masked.load to <4 x i32>
+  %3 = getelementptr inbounds i32, ptr %a, i32 %index
+  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %4 = add nsw <4 x i32> %wide.masked.load10, %2
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %3, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local arm_aapcs_vfpcc void @zext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+define dso_local arm_aapcs_vfpcc void @zext_i16(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: zext_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -177,31 +168,28 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %0 = getelementptr inbounds i16, ptr %b, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %2 = bitcast i16* %0 to <4 x i16>*
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
-  %3 = zext <4 x i16> %wide.masked.load to <4 x i32>
-  %4 = getelementptr inbounds i32, i32* %a, i32 %index
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %6 = add <4 x i32> %wide.masked.load10, %3
-  %7 = bitcast i32* %4 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %0, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %2 = zext <4 x i16> %wide.masked.load to <4 x i32>
+  %3 = getelementptr inbounds i32, ptr %a, i32 %index
+  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %4 = add <4 x i32> %wide.masked.load10, %2
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %3, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32 immarg, <8 x i1>, <8 x i8>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inlineasm.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inlineasm.ll
index 6e3499bda4cb7..d67e66d7a7131 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inlineasm.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inlineasm.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs -o - %s | FileCheck %s
 
-define i32 @test(i16* nocapture readonly %x, i16* nocapture readonly %y, i32 %n) {
+define i32 @test(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: test:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -37,10 +37,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 for.body:                                         ; preds = %entry, %for.body
   %s.011 = phi i32 [ %add, %for.body ], [ 0, %entry ]
   %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i16, i16* %x, i32 %i.010
-  %0 = load i16, i16* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds i16, i16* %y, i32 %i.010
-  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.010
+  %0 = load i16, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.010
+  %1 = load i16, ptr %arrayidx1, align 2
   %2 = tail call i32 asm "add $0, $1, $2", "=r,r,r"(i16 %0, i16 %1) #1
   %add = add nsw i32 %2, %s.011
   %inc = add nuw nsw i32 %i.010, 1
@@ -48,7 +48,7 @@ for.body:                                         ; preds = %entry, %for.body
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define i32 @testlr(i16* nocapture readonly %x, i16* nocapture readonly %y, i32 %n) {
+define i32 @testlr(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: testlr:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -84,10 +84,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 for.body:                                         ; preds = %entry, %for.body
   %s.011 = phi i32 [ %add, %for.body ], [ 0, %entry ]
   %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i16, i16* %x, i32 %i.010
-  %0 = load i16, i16* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds i16, i16* %y, i32 %i.010
-  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.010
+  %0 = load i16, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.010
+  %1 = load i16, ptr %arrayidx1, align 2
   %2 = tail call i32 asm "add $0, $1, $2", "=r,r,r,~{lr}"(i16 %0, i16 %1) #1
   %add = add nsw i32 %2, %s.011
   %inc = add nuw nsw i32 %i.010, 1

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-guards.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-guards.ll
index 1098038d53e3f..342b07e2a19d5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-guards.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-guards.ll
@@ -21,7 +21,7 @@
 ; CHECK-NOT:   $lr = t2DLS killed renamable $lr
 ; CHECK: bb.3.while.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.3
-define void @ne_and_guard(i1 zeroext %t1, i1 zeroext %t2, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @ne_and_guard(i1 zeroext %t1, i1 zeroext %t2, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %brmerge.demorgan = and i1 %t1, %t2
   %cmp6 = icmp ne i32 %N, 0
@@ -30,12 +30,12 @@ entry:
 
 while.body:                                       ; preds = %while.body, %entry
   %i.09 = phi i32 [ %inc, %while.body ], [ 0, %entry ]
-  %a.addr.08 = phi i32* [ %incdec.ptr3, %while.body ], [ %a, %entry ]
-  %b.addr.07 = phi i32* [ %incdec.ptr, %while.body ], [ %b, %entry ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.07, i32 1
-  %tmp = load i32, i32* %b.addr.07, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.08, i32 1
-  store i32 %tmp, i32* %a.addr.08, align 4
+  %a.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %a, %entry ]
+  %b.addr.07 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %entry ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.07, i32 1
+  %tmp = load i32, ptr %b.addr.07, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.08, i32 1
+  store i32 %tmp, ptr %a.addr.08, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %if.end, label %while.body
@@ -54,7 +54,7 @@ if.end:                                           ; preds = %while.body, %entry
 ; CHECK-NOT:   $lr = t2DLS killed renamable $lr
 ; CHECK: bb.3.while.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.3
-define void @ne_preheader(i1 zeroext %t1, i1 zeroext %t2, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @ne_preheader(i1 zeroext %t1, i1 zeroext %t2, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %brmerge.demorgan = and i1 %t1, %t2
   br i1 %brmerge.demorgan, label %while.preheader, label %if.end
@@ -65,12 +65,12 @@ while.preheader:                                  ; preds = %entry
 
 while.body:                                       ; preds = %while.body, %while.preheader
   %i.09 = phi i32 [ %inc, %while.body ], [ 0, %while.preheader ]
-  %a.addr.08 = phi i32* [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
-  %b.addr.07 = phi i32* [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.07, i32 1
-  %tmp = load i32, i32* %b.addr.07, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.08, i32 1
-  store i32 %tmp, i32* %a.addr.08, align 4
+  %a.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
+  %b.addr.07 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.07, i32 1
+  %tmp = load i32, ptr %b.addr.07, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.08, i32 1
+  store i32 %tmp, ptr %a.addr.08, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %if.end, label %while.body
@@ -89,7 +89,7 @@ if.end:                                           ; preds = %while.body, %while.
 ; CHECK-NOT:   $lr = t2DLS killed renamable $lr
 ; CHECK: bb.3.while.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.3
-define void @eq_preheader(i1 zeroext %t1, i1 zeroext %t2, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @eq_preheader(i1 zeroext %t1, i1 zeroext %t2, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %brmerge.demorgan = and i1 %t1, %t2
   br i1 %brmerge.demorgan, label %while.preheader, label %if.end
@@ -100,12 +100,12 @@ while.preheader:                                  ; preds = %entry
 
 while.body:                                       ; preds = %while.body, %while.preheader
   %i.09 = phi i32 [ %inc, %while.body ], [ 0, %while.preheader ]
-  %a.addr.08 = phi i32* [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
-  %b.addr.07 = phi i32* [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.07, i32 1
-  %tmp = load i32, i32* %b.addr.07, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.08, i32 1
-  store i32 %tmp, i32* %a.addr.08, align 4
+  %a.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
+  %b.addr.07 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.07, i32 1
+  %tmp = load i32, ptr %b.addr.07, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.08, i32 1
+  store i32 %tmp, ptr %a.addr.08, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %if.end, label %while.body
@@ -124,7 +124,7 @@ if.end:                                           ; preds = %while.body, %while.
 ; CHECK-NOT:   $lr = t2DLS killed renamable $lr
 ; CHECK: bb.3.while.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.3
-define void @ne_prepreheader(i1 zeroext %t1, i1 zeroext %t2, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @ne_prepreheader(i1 zeroext %t1, i1 zeroext %t2, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %cmp = icmp ne i32 %N, 0
   br i1 %cmp, label %while.preheader, label %if.end
@@ -135,12 +135,12 @@ while.preheader:                                  ; preds = %entry
 
 while.body:                                       ; preds = %while.body, %while.preheader
   %i.09 = phi i32 [ %inc, %while.body ], [ 0, %while.preheader ]
-  %a.addr.08 = phi i32* [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
-  %b.addr.07 = phi i32* [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.07, i32 1
-  %tmp = load i32, i32* %b.addr.07, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.08, i32 1
-  store i32 %tmp, i32* %a.addr.08, align 4
+  %a.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %a, %while.preheader ]
+  %b.addr.07 = phi ptr [ %incdec.ptr, %while.body ], [ %b, %while.preheader ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.07, i32 1
+  %tmp = load i32, ptr %b.addr.07, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.08, i32 1
+  store i32 %tmp, ptr %a.addr.08, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %if.end, label %while.body
@@ -155,7 +155,7 @@ if.end:                                           ; preds = %while.body, %while.
 ; CHECK:   $lr =
 ; CHECK: bb.2.do.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.2
-define void @be_ne(i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @be_ne(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   %cmp = icmp ne i32 %N, 0
   %sub = sub i32 %N, 1
@@ -164,13 +164,13 @@ entry:
   br i1 %cmp.1, label %do.body, label %if.end
 
 do.body:                                          ; preds = %do.body, %entry
-  %b.addr.0 = phi i32* [ %incdec.ptr, %do.body ], [ %b, %entry ]
-  %a.addr.0 = phi i32* [ %incdec.ptr3, %do.body ], [ %a, %entry ]
+  %b.addr.0 = phi ptr [ %incdec.ptr, %do.body ], [ %b, %entry ]
+  %a.addr.0 = phi ptr [ %incdec.ptr3, %do.body ], [ %a, %entry ]
   %i.0 = phi i32 [ %inc, %do.body ], [ 0, %entry ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.0, i32 1
-  %tmp = load i32, i32* %b.addr.0, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.0, i32 1
-  store i32 %tmp, i32* %a.addr.0, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.0, i32 1
+  %tmp = load i32, ptr %b.addr.0, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.0, i32 1
+  store i32 %tmp, ptr %a.addr.0, align 4
   %inc = add nuw i32 %i.0, 1
   %cmp.2 = icmp ult i32 %inc, %N
   br i1 %cmp.2, label %do.body, label %if.end
@@ -186,7 +186,7 @@ if.end:                                           ; preds = %do.body, %entry
 ; CHECK: bb.1.do.body.preheader:
 ; CHECK: bb.2.do.body:
 ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.2
-define void @ne_trip_count(i1 zeroext %t1, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) {
+define void @ne_trip_count(i1 zeroext %t1, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
 entry:
   br label %do.body.preheader
 
@@ -195,13 +195,13 @@ do.body.preheader:
   br i1 %cmp, label %do.body, label %if.end
 
 do.body:
-  %b.addr.0 = phi i32* [ %incdec.ptr, %do.body ], [ %b, %do.body.preheader ]
-  %a.addr.0 = phi i32* [ %incdec.ptr3, %do.body ], [ %a, %do.body.preheader ]
+  %b.addr.0 = phi ptr [ %incdec.ptr, %do.body ], [ %b, %do.body.preheader ]
+  %a.addr.0 = phi ptr [ %incdec.ptr3, %do.body ], [ %a, %do.body.preheader ]
   %i.0 = phi i32 [ %inc, %do.body ], [ 0, %do.body.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.0, i32 1
-  %tmp = load i32, i32* %b.addr.0, align 4
-  %incdec.ptr3 = getelementptr inbounds i32, i32* %a.addr.0, i32 1
-  store i32 %tmp, i32* %a.addr.0, align 4
+  %incdec.ptr = getelementptr inbounds i32, ptr %b.addr.0, i32 1
+  %tmp = load i32, ptr %b.addr.0, align 4
+  %incdec.ptr3 = getelementptr inbounds i32, ptr %a.addr.0, i32 1
+  store i32 %tmp, ptr %a.addr.0, align 4
   %inc = add nuw i32 %i.0, 1
   %cmp.1 = icmp ult i32 %inc, %N
   br i1 %cmp.1, label %do.body, label %if.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lsr-profitable-chain.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lsr-profitable-chain.ll
index dfb5c79404d00..6d9dc46e1caa0 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lsr-profitable-chain.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lsr-profitable-chain.ll
@@ -7,7 +7,7 @@ target triple = "thumbv8.1m-arm-none-eabi"
 ; Tests that LSR will not interfere with the VCTP intrinsic,
 ; and that this loop will correctly become tail-predicated.
 
-define arm_aapcs_vfpcc float @vctpi32(float* %0, i32 %1) {
+define arm_aapcs_vfpcc float @vctpi32(ptr %0, i32 %1) {
 ; CHECK-LABEL: vctpi32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    push {r4, lr}
@@ -34,7 +34,7 @@ define arm_aapcs_vfpcc float @vctpi32(float* %0, i32 %1) {
   %3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 0, i32 8)
   %4 = extractvalue { <4 x i32>, i32 } %3, 0
   %5 = add nsw i32 %1, -1
-  %6 = ptrtoint float* %0 to i32
+  %6 = ptrtoint ptr %0 to i32
   %7 = insertelement <4 x i32> undef, i32 %6, i32 0
   %8 = add <4 x i32> %7, <i32 -32, i32 undef, i32 undef, i32 undef>
   %9 = shufflevector <4 x i32> %8, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -55,7 +55,7 @@ define arm_aapcs_vfpcc float @vctpi32(float* %0, i32 %1) {
   br i1 %21, label %11, label %22
 
 22:
-  %23 = tail call arm_aapcs_vfpcc i32 bitcast (i32 (...)* @vecAddAcrossF32Mve to i32 (<4 x float>)*)(<4 x float> %19)
+  %23 = tail call arm_aapcs_vfpcc i32 @vecAddAcrossF32Mve(<4 x float> %19)
   %24 = sitofp i32 %23 to float
   %25 = tail call float @llvm.fabs.f32(float %24)
   ret float %25

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
index 1b450f84ac249..44cbd7d65125e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @arm_min_q31(i32* nocapture readonly %pSrc, i32 %blockSize, i32* nocapture %pResult, i32* nocapture %pIndex) {
+define void @arm_min_q31(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocapture %pResult, ptr nocapture %pIndex) {
 ; CHECK-LABEL: arm_min_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -79,7 +79,7 @@ define void @arm_min_q31(i32* nocapture readonly %pSrc, i32 %blockSize, i32* noc
 ; CHECK-NEXT:    str r6, [r3]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 entry:
-  %0 = load i32, i32* %pSrc, align 4
+  %0 = load i32, ptr %pSrc, align 4
   %blkCnt.015 = add i32 %blockSize, -1
   %cmp.not17 = icmp eq i32 %blkCnt.015, 0
   br i1 %cmp.not17, label %while.end, label %while.body.preheader
@@ -95,34 +95,34 @@ while.body.preheader.new:                         ; preds = %while.body.preheade
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.preheader.new
-  %pSrc.addr.021.pn = phi i32* [ %pSrc, %while.body.preheader.new ], [ %pSrc.addr.021.3, %while.body ]
+  %pSrc.addr.021.pn = phi ptr [ %pSrc, %while.body.preheader.new ], [ %pSrc.addr.021.3, %while.body ]
   %blkCnt.020 = phi i32 [ %blkCnt.015, %while.body.preheader.new ], [ %blkCnt.0.3, %while.body ]
   %outIndex.019 = phi i32 [ 0, %while.body.preheader.new ], [ %spec.select14.3, %while.body ]
   %out.018 = phi i32 [ %0, %while.body.preheader.new ], [ %spec.select.3, %while.body ]
   %niter = phi i32 [ %unroll_iter, %while.body.preheader.new ], [ %niter.nsub.3, %while.body ]
-  %pSrc.addr.021 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn, i32 1
-  %3 = load i32, i32* %pSrc.addr.021, align 4
+  %pSrc.addr.021 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn, i32 1
+  %3 = load i32, ptr %pSrc.addr.021, align 4
   %cmp2 = icmp sgt i32 %out.018, %3
   %sub3 = sub i32 %blockSize, %blkCnt.020
   %spec.select = select i1 %cmp2, i32 %3, i32 %out.018
   %spec.select14 = select i1 %cmp2, i32 %sub3, i32 %outIndex.019
   %blkCnt.0 = add i32 %blkCnt.020, -1
-  %pSrc.addr.021.1 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn, i32 2
-  %4 = load i32, i32* %pSrc.addr.021.1, align 4
+  %pSrc.addr.021.1 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn, i32 2
+  %4 = load i32, ptr %pSrc.addr.021.1, align 4
   %cmp2.1 = icmp sgt i32 %spec.select, %4
   %sub3.1 = sub i32 %blockSize, %blkCnt.0
   %spec.select.1 = select i1 %cmp2.1, i32 %4, i32 %spec.select
   %spec.select14.1 = select i1 %cmp2.1, i32 %sub3.1, i32 %spec.select14
   %blkCnt.0.1 = add i32 %blkCnt.020, -2
-  %pSrc.addr.021.2 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn, i32 3
-  %5 = load i32, i32* %pSrc.addr.021.2, align 4
+  %pSrc.addr.021.2 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn, i32 3
+  %5 = load i32, ptr %pSrc.addr.021.2, align 4
   %cmp2.2 = icmp sgt i32 %spec.select.1, %5
   %sub3.2 = sub i32 %blockSize, %blkCnt.0.1
   %spec.select.2 = select i1 %cmp2.2, i32 %5, i32 %spec.select.1
   %spec.select14.2 = select i1 %cmp2.2, i32 %sub3.2, i32 %spec.select14.1
   %blkCnt.0.2 = add i32 %blkCnt.020, -3
-  %pSrc.addr.021.3 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn, i32 4
-  %6 = load i32, i32* %pSrc.addr.021.3, align 4
+  %pSrc.addr.021.3 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn, i32 4
+  %6 = load i32, ptr %pSrc.addr.021.3, align 4
   %cmp2.3 = icmp sgt i32 %spec.select.2, %6
   %sub3.3 = sub i32 %blockSize, %blkCnt.0.2
   %spec.select.3 = select i1 %cmp2.3, i32 %6, i32 %spec.select.2
@@ -135,7 +135,7 @@ while.body:                                       ; preds = %while.body, %while.
 while.end.loopexit.unr-lcssa:                     ; preds = %while.body, %while.body.preheader
   %spec.select.lcssa.ph = phi i32 [ undef, %while.body.preheader ], [ %spec.select.3, %while.body ]
   %spec.select14.lcssa.ph = phi i32 [ undef, %while.body.preheader ], [ %spec.select14.3, %while.body ]
-  %pSrc.addr.021.pn.unr = phi i32* [ %pSrc, %while.body.preheader ], [ %pSrc.addr.021.3, %while.body ]
+  %pSrc.addr.021.pn.unr = phi ptr [ %pSrc, %while.body.preheader ], [ %pSrc.addr.021.3, %while.body ]
   %blkCnt.020.unr = phi i32 [ %blkCnt.015, %while.body.preheader ], [ %blkCnt.0.3, %while.body ]
   %outIndex.019.unr = phi i32 [ 0, %while.body.preheader ], [ %spec.select14.3, %while.body ]
   %out.018.unr = phi i32 [ %0, %while.body.preheader ], [ %spec.select.3, %while.body ]
@@ -143,8 +143,8 @@ while.end.loopexit.unr-lcssa:                     ; preds = %while.body, %while.
   br i1 %lcmp.mod.not, label %while.end, label %while.body.epil
 
 while.body.epil:                                  ; preds = %while.end.loopexit.unr-lcssa
-  %pSrc.addr.021.epil = getelementptr inbounds i32, i32* %pSrc.addr.021.pn.unr, i32 1
-  %7 = load i32, i32* %pSrc.addr.021.epil, align 4
+  %pSrc.addr.021.epil = getelementptr inbounds i32, ptr %pSrc.addr.021.pn.unr, i32 1
+  %7 = load i32, ptr %pSrc.addr.021.epil, align 4
   %cmp2.epil = icmp sgt i32 %out.018.unr, %7
   %sub3.epil = sub i32 %blockSize, %blkCnt.020.unr
   %spec.select.epil = select i1 %cmp2.epil, i32 %7, i32 %out.018.unr
@@ -155,14 +155,14 @@ while.body.epil:                                  ; preds = %while.end.loopexit.
 while.end:                                        ; preds = %while.end.loopexit.unr-lcssa, %while.body.epil.2, %while.body.epil.1, %while.body.epil, %entry
   %out.0.lcssa = phi i32 [ %0, %entry ], [ %spec.select.lcssa.ph, %while.end.loopexit.unr-lcssa ], [ %spec.select.epil, %while.body.epil ], [ %spec.select.epil.1, %while.body.epil.1 ], [ %spec.select.epil.2, %while.body.epil.2 ]
   %outIndex.0.lcssa = phi i32 [ 0, %entry ], [ %spec.select14.lcssa.ph, %while.end.loopexit.unr-lcssa ], [ %spec.select14.epil, %while.body.epil ], [ %spec.select14.epil.1, %while.body.epil.1 ], [ %spec.select14.epil.2, %while.body.epil.2 ]
-  store i32 %out.0.lcssa, i32* %pResult, align 4
-  store i32 %outIndex.0.lcssa, i32* %pIndex, align 4
+  store i32 %out.0.lcssa, ptr %pResult, align 4
+  store i32 %outIndex.0.lcssa, ptr %pIndex, align 4
   ret void
 
 while.body.epil.1:                                ; preds = %while.body.epil
   %blkCnt.0.epil = add i32 %blkCnt.020.unr, -1
-  %pSrc.addr.021.epil.1 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn.unr, i32 2
-  %8 = load i32, i32* %pSrc.addr.021.epil.1, align 4
+  %pSrc.addr.021.epil.1 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn.unr, i32 2
+  %8 = load i32, ptr %pSrc.addr.021.epil.1, align 4
   %cmp2.epil.1 = icmp sgt i32 %spec.select.epil, %8
   %sub3.epil.1 = sub i32 %blockSize, %blkCnt.0.epil
   %spec.select.epil.1 = select i1 %cmp2.epil.1, i32 %8, i32 %spec.select.epil
@@ -172,8 +172,8 @@ while.body.epil.1:                                ; preds = %while.body.epil
 
 while.body.epil.2:                                ; preds = %while.body.epil.1
   %blkCnt.0.epil.1 = add i32 %blkCnt.020.unr, -2
-  %pSrc.addr.021.epil.2 = getelementptr inbounds i32, i32* %pSrc.addr.021.pn.unr, i32 3
-  %9 = load i32, i32* %pSrc.addr.021.epil.2, align 4
+  %pSrc.addr.021.epil.2 = getelementptr inbounds i32, ptr %pSrc.addr.021.pn.unr, i32 3
+  %9 = load i32, ptr %pSrc.addr.021.epil.2, align 4
   %cmp2.epil.2 = icmp sgt i32 %spec.select.epil.1, %9
   %sub3.epil.2 = sub i32 %blockSize, %blkCnt.0.epil.1
   %spec.select.epil.2 = select i1 %cmp2.epil.2, i32 %9, i32 %spec.select.epil.1

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
index 03bc01822b376..7a279eab36d9d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp,+fp-armv8d16sp,+fp16,+fullfp16 %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @float_float_mul(float* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @float_float_mul(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: float_float_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
@@ -119,13 +119,13 @@ for.body.preheader22:                             ; preds = %middle.block, %vect
 for.body.prol:                                    ; preds = %for.body.preheader22, %for.body.prol
   %i.09.prol = phi i32 [ %inc.prol, %for.body.prol ], [ %i.09.ph, %for.body.preheader22 ]
   %prol.iter = phi i32 [ %prol.iter.sub, %for.body.prol ], [ %xtraiter, %for.body.preheader22 ]
-  %arrayidx.prol = getelementptr inbounds float, float* %a, i32 %i.09.prol
-  %2 = load float, float* %arrayidx.prol, align 4
-  %arrayidx1.prol = getelementptr inbounds float, float* %b, i32 %i.09.prol
-  %3 = load float, float* %arrayidx1.prol, align 4
+  %arrayidx.prol = getelementptr inbounds float, ptr %a, i32 %i.09.prol
+  %2 = load float, ptr %arrayidx.prol, align 4
+  %arrayidx1.prol = getelementptr inbounds float, ptr %b, i32 %i.09.prol
+  %3 = load float, ptr %arrayidx1.prol, align 4
   %mul.prol = fmul float %2, %3
-  %arrayidx2.prol = getelementptr inbounds float, float* %c, i32 %i.09.prol
-  store float %mul.prol, float* %arrayidx2.prol, align 4
+  %arrayidx2.prol = getelementptr inbounds float, ptr %c, i32 %i.09.prol
+  store float %mul.prol, ptr %arrayidx2.prol, align 4
   %inc.prol = add nuw i32 %i.09.prol, 1
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
@@ -137,14 +137,14 @@ for.body.prol.loopexit:                           ; preds = %for.body.prol, %for
   br i1 %4, label %for.cond.cleanup, label %for.body
 
 vector.memcheck:                                  ; preds = %for.body.preheader
-  %scevgep = getelementptr float, float* %c, i32 %N
-  %scevgep13 = getelementptr float, float* %a, i32 %N
-  %scevgep16 = getelementptr float, float* %b, i32 %N
-  %bound0 = icmp ugt float* %scevgep13, %c
-  %bound1 = icmp ugt float* %scevgep, %a
+  %scevgep = getelementptr float, ptr %c, i32 %N
+  %scevgep13 = getelementptr float, ptr %a, i32 %N
+  %scevgep16 = getelementptr float, ptr %b, i32 %N
+  %bound0 = icmp ugt ptr %scevgep13, %c
+  %bound1 = icmp ugt ptr %scevgep, %a
   %found.conflict = and i1 %bound0, %bound1
-  %bound018 = icmp ugt float* %scevgep16, %c
-  %bound119 = icmp ugt float* %scevgep, %b
+  %bound018 = icmp ugt ptr %scevgep16, %c
+  %bound119 = icmp ugt ptr %scevgep, %b
   %found.conflict20 = and i1 %bound018, %bound119
   %conflict.rdx = or i1 %found.conflict, %found.conflict20
   br i1 %conflict.rdx, label %for.body.preheader22, label %vector.ph
@@ -155,19 +155,16 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %5 = getelementptr inbounds float, float* %a, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %6, align 4
-  %7 = getelementptr inbounds float, float* %b, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  %wide.load21 = load <4 x float>, <4 x float>* %8, align 4
-  %9 = fmul <4 x float> %wide.load, %wide.load21
-  %10 = getelementptr inbounds float, float* %c, i32 %index
-  %11 = bitcast float* %10 to <4 x float>*
-  store <4 x float> %9, <4 x float>* %11, align 4
+  %5 = getelementptr inbounds float, ptr %a, i32 %index
+  %wide.load = load <4 x float>, ptr %5, align 4
+  %6 = getelementptr inbounds float, ptr %b, i32 %index
+  %wide.load21 = load <4 x float>, ptr %6, align 4
+  %7 = fmul <4 x float> %wide.load, %wide.load21
+  %8 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -178,43 +175,43 @@ for.cond.cleanup:                                 ; preds = %for.body.prol.loope
 
 for.body:                                         ; preds = %for.body.prol.loopexit, %for.body
   %i.09 = phi i32 [ %inc.3, %for.body ], [ %i.09.unr, %for.body.prol.loopexit ]
-  %arrayidx = getelementptr inbounds float, float* %a, i32 %i.09
-  %13 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i32 %i.09
-  %14 = load float, float* %arrayidx1, align 4
-  %mul = fmul float %13, %14
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %mul, float* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 %i.09
+  %10 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i32 %i.09
+  %11 = load float, ptr %arrayidx1, align 4
+  %mul = fmul float %10, %11
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %mul, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
-  %arrayidx.1 = getelementptr inbounds float, float* %a, i32 %inc
-  %15 = load float, float* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds float, float* %b, i32 %inc
-  %16 = load float, float* %arrayidx1.1, align 4
-  %mul.1 = fmul float %15, %16
-  %arrayidx2.1 = getelementptr inbounds float, float* %c, i32 %inc
-  store float %mul.1, float* %arrayidx2.1, align 4
+  %arrayidx.1 = getelementptr inbounds float, ptr %a, i32 %inc
+  %12 = load float, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds float, ptr %b, i32 %inc
+  %13 = load float, ptr %arrayidx1.1, align 4
+  %mul.1 = fmul float %12, %13
+  %arrayidx2.1 = getelementptr inbounds float, ptr %c, i32 %inc
+  store float %mul.1, ptr %arrayidx2.1, align 4
   %inc.1 = add nuw i32 %i.09, 2
-  %arrayidx.2 = getelementptr inbounds float, float* %a, i32 %inc.1
-  %17 = load float, float* %arrayidx.2, align 4
-  %arrayidx1.2 = getelementptr inbounds float, float* %b, i32 %inc.1
-  %18 = load float, float* %arrayidx1.2, align 4
-  %mul.2 = fmul float %17, %18
-  %arrayidx2.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  store float %mul.2, float* %arrayidx2.2, align 4
+  %arrayidx.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
+  %14 = load float, ptr %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds float, ptr %b, i32 %inc.1
+  %15 = load float, ptr %arrayidx1.2, align 4
+  %mul.2 = fmul float %14, %15
+  %arrayidx2.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
+  store float %mul.2, ptr %arrayidx2.2, align 4
   %inc.2 = add nuw i32 %i.09, 3
-  %arrayidx.3 = getelementptr inbounds float, float* %a, i32 %inc.2
-  %19 = load float, float* %arrayidx.3, align 4
-  %arrayidx1.3 = getelementptr inbounds float, float* %b, i32 %inc.2
-  %20 = load float, float* %arrayidx1.3, align 4
-  %mul.3 = fmul float %19, %20
-  %arrayidx2.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  store float %mul.3, float* %arrayidx2.3, align 4
+  %arrayidx.3 = getelementptr inbounds float, ptr %a, i32 %inc.2
+  %16 = load float, ptr %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds float, ptr %b, i32 %inc.2
+  %17 = load float, ptr %arrayidx1.3, align 4
+  %mul.3 = fmul float %16, %17
+  %arrayidx2.3 = getelementptr inbounds float, ptr %c, i32 %inc.2
+  store float %mul.3, ptr %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
   %exitcond.3 = icmp eq i32 %inc.3, %N
   br i1 %exitcond.3, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @float_float_add(float* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @float_float_add(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: float_float_add:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
@@ -332,13 +329,13 @@ for.body.preheader22:                             ; preds = %middle.block, %vect
 for.body.prol:                                    ; preds = %for.body.preheader22, %for.body.prol
   %i.09.prol = phi i32 [ %inc.prol, %for.body.prol ], [ %i.09.ph, %for.body.preheader22 ]
   %prol.iter = phi i32 [ %prol.iter.sub, %for.body.prol ], [ %xtraiter, %for.body.preheader22 ]
-  %arrayidx.prol = getelementptr inbounds float, float* %a, i32 %i.09.prol
-  %2 = load float, float* %arrayidx.prol, align 4
-  %arrayidx1.prol = getelementptr inbounds float, float* %b, i32 %i.09.prol
-  %3 = load float, float* %arrayidx1.prol, align 4
+  %arrayidx.prol = getelementptr inbounds float, ptr %a, i32 %i.09.prol
+  %2 = load float, ptr %arrayidx.prol, align 4
+  %arrayidx1.prol = getelementptr inbounds float, ptr %b, i32 %i.09.prol
+  %3 = load float, ptr %arrayidx1.prol, align 4
   %add.prol = fadd float %2, %3
-  %arrayidx2.prol = getelementptr inbounds float, float* %c, i32 %i.09.prol
-  store float %add.prol, float* %arrayidx2.prol, align 4
+  %arrayidx2.prol = getelementptr inbounds float, ptr %c, i32 %i.09.prol
+  store float %add.prol, ptr %arrayidx2.prol, align 4
   %inc.prol = add nuw i32 %i.09.prol, 1
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
@@ -350,14 +347,14 @@ for.body.prol.loopexit:                           ; preds = %for.body.prol, %for
   br i1 %4, label %for.cond.cleanup, label %for.body
 
 vector.memcheck:                                  ; preds = %for.body.preheader
-  %scevgep = getelementptr float, float* %c, i32 %N
-  %scevgep13 = getelementptr float, float* %a, i32 %N
-  %scevgep16 = getelementptr float, float* %b, i32 %N
-  %bound0 = icmp ugt float* %scevgep13, %c
-  %bound1 = icmp ugt float* %scevgep, %a
+  %scevgep = getelementptr float, ptr %c, i32 %N
+  %scevgep13 = getelementptr float, ptr %a, i32 %N
+  %scevgep16 = getelementptr float, ptr %b, i32 %N
+  %bound0 = icmp ugt ptr %scevgep13, %c
+  %bound1 = icmp ugt ptr %scevgep, %a
   %found.conflict = and i1 %bound0, %bound1
-  %bound018 = icmp ugt float* %scevgep16, %c
-  %bound119 = icmp ugt float* %scevgep, %b
+  %bound018 = icmp ugt ptr %scevgep16, %c
+  %bound119 = icmp ugt ptr %scevgep, %b
   %found.conflict20 = and i1 %bound018, %bound119
   %conflict.rdx = or i1 %found.conflict, %found.conflict20
   br i1 %conflict.rdx, label %for.body.preheader22, label %vector.ph
@@ -368,19 +365,16 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %5 = getelementptr inbounds float, float* %a, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %6, align 4
-  %7 = getelementptr inbounds float, float* %b, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  %wide.load21 = load <4 x float>, <4 x float>* %8, align 4
-  %9 = fadd <4 x float> %wide.load, %wide.load21
-  %10 = getelementptr inbounds float, float* %c, i32 %index
-  %11 = bitcast float* %10 to <4 x float>*
-  store <4 x float> %9, <4 x float>* %11, align 4
+  %5 = getelementptr inbounds float, ptr %a, i32 %index
+  %wide.load = load <4 x float>, ptr %5, align 4
+  %6 = getelementptr inbounds float, ptr %b, i32 %index
+  %wide.load21 = load <4 x float>, ptr %6, align 4
+  %7 = fadd <4 x float> %wide.load, %wide.load21
+  %8 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -391,43 +385,43 @@ for.cond.cleanup:                                 ; preds = %for.body.prol.loope
 
 for.body:                                         ; preds = %for.body.prol.loopexit, %for.body
   %i.09 = phi i32 [ %inc.3, %for.body ], [ %i.09.unr, %for.body.prol.loopexit ]
-  %arrayidx = getelementptr inbounds float, float* %a, i32 %i.09
-  %13 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i32 %i.09
-  %14 = load float, float* %arrayidx1, align 4
-  %add = fadd float %13, %14
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %add, float* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 %i.09
+  %10 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i32 %i.09
+  %11 = load float, ptr %arrayidx1, align 4
+  %add = fadd float %10, %11
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %add, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
-  %arrayidx.1 = getelementptr inbounds float, float* %a, i32 %inc
-  %15 = load float, float* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds float, float* %b, i32 %inc
-  %16 = load float, float* %arrayidx1.1, align 4
-  %add.1 = fadd float %15, %16
-  %arrayidx2.1 = getelementptr inbounds float, float* %c, i32 %inc
-  store float %add.1, float* %arrayidx2.1, align 4
+  %arrayidx.1 = getelementptr inbounds float, ptr %a, i32 %inc
+  %12 = load float, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds float, ptr %b, i32 %inc
+  %13 = load float, ptr %arrayidx1.1, align 4
+  %add.1 = fadd float %12, %13
+  %arrayidx2.1 = getelementptr inbounds float, ptr %c, i32 %inc
+  store float %add.1, ptr %arrayidx2.1, align 4
   %inc.1 = add nuw i32 %i.09, 2
-  %arrayidx.2 = getelementptr inbounds float, float* %a, i32 %inc.1
-  %17 = load float, float* %arrayidx.2, align 4
-  %arrayidx1.2 = getelementptr inbounds float, float* %b, i32 %inc.1
-  %18 = load float, float* %arrayidx1.2, align 4
-  %add.2 = fadd float %17, %18
-  %arrayidx2.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  store float %add.2, float* %arrayidx2.2, align 4
+  %arrayidx.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
+  %14 = load float, ptr %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds float, ptr %b, i32 %inc.1
+  %15 = load float, ptr %arrayidx1.2, align 4
+  %add.2 = fadd float %14, %15
+  %arrayidx2.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
+  store float %add.2, ptr %arrayidx2.2, align 4
   %inc.2 = add nuw i32 %i.09, 3
-  %arrayidx.3 = getelementptr inbounds float, float* %a, i32 %inc.2
-  %19 = load float, float* %arrayidx.3, align 4
-  %arrayidx1.3 = getelementptr inbounds float, float* %b, i32 %inc.2
-  %20 = load float, float* %arrayidx1.3, align 4
-  %add.3 = fadd float %19, %20
-  %arrayidx2.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  store float %add.3, float* %arrayidx2.3, align 4
+  %arrayidx.3 = getelementptr inbounds float, ptr %a, i32 %inc.2
+  %16 = load float, ptr %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds float, ptr %b, i32 %inc.2
+  %17 = load float, ptr %arrayidx1.3, align 4
+  %add.3 = fadd float %16, %17
+  %arrayidx2.3 = getelementptr inbounds float, ptr %c, i32 %inc.2
+  store float %add.3, ptr %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
   %exitcond.3 = icmp eq i32 %inc.3, %N
   br i1 %exitcond.3, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @float_float_sub(float* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @float_float_sub(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: float_float_sub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
@@ -545,13 +539,13 @@ for.body.preheader22:                             ; preds = %middle.block, %vect
 for.body.prol:                                    ; preds = %for.body.preheader22, %for.body.prol
   %i.09.prol = phi i32 [ %inc.prol, %for.body.prol ], [ %i.09.ph, %for.body.preheader22 ]
   %prol.iter = phi i32 [ %prol.iter.sub, %for.body.prol ], [ %xtraiter, %for.body.preheader22 ]
-  %arrayidx.prol = getelementptr inbounds float, float* %a, i32 %i.09.prol
-  %2 = load float, float* %arrayidx.prol, align 4
-  %arrayidx1.prol = getelementptr inbounds float, float* %b, i32 %i.09.prol
-  %3 = load float, float* %arrayidx1.prol, align 4
+  %arrayidx.prol = getelementptr inbounds float, ptr %a, i32 %i.09.prol
+  %2 = load float, ptr %arrayidx.prol, align 4
+  %arrayidx1.prol = getelementptr inbounds float, ptr %b, i32 %i.09.prol
+  %3 = load float, ptr %arrayidx1.prol, align 4
   %sub.prol = fsub float %2, %3
-  %arrayidx2.prol = getelementptr inbounds float, float* %c, i32 %i.09.prol
-  store float %sub.prol, float* %arrayidx2.prol, align 4
+  %arrayidx2.prol = getelementptr inbounds float, ptr %c, i32 %i.09.prol
+  store float %sub.prol, ptr %arrayidx2.prol, align 4
   %inc.prol = add nuw i32 %i.09.prol, 1
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
@@ -563,14 +557,14 @@ for.body.prol.loopexit:                           ; preds = %for.body.prol, %for
   br i1 %4, label %for.cond.cleanup, label %for.body
 
 vector.memcheck:                                  ; preds = %for.body.preheader
-  %scevgep = getelementptr float, float* %c, i32 %N
-  %scevgep13 = getelementptr float, float* %a, i32 %N
-  %scevgep16 = getelementptr float, float* %b, i32 %N
-  %bound0 = icmp ugt float* %scevgep13, %c
-  %bound1 = icmp ugt float* %scevgep, %a
+  %scevgep = getelementptr float, ptr %c, i32 %N
+  %scevgep13 = getelementptr float, ptr %a, i32 %N
+  %scevgep16 = getelementptr float, ptr %b, i32 %N
+  %bound0 = icmp ugt ptr %scevgep13, %c
+  %bound1 = icmp ugt ptr %scevgep, %a
   %found.conflict = and i1 %bound0, %bound1
-  %bound018 = icmp ugt float* %scevgep16, %c
-  %bound119 = icmp ugt float* %scevgep, %b
+  %bound018 = icmp ugt ptr %scevgep16, %c
+  %bound119 = icmp ugt ptr %scevgep, %b
   %found.conflict20 = and i1 %bound018, %bound119
   %conflict.rdx = or i1 %found.conflict, %found.conflict20
   br i1 %conflict.rdx, label %for.body.preheader22, label %vector.ph
@@ -581,19 +575,16 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %5 = getelementptr inbounds float, float* %a, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %6, align 4
-  %7 = getelementptr inbounds float, float* %b, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  %wide.load21 = load <4 x float>, <4 x float>* %8, align 4
-  %9 = fsub <4 x float> %wide.load, %wide.load21
-  %10 = getelementptr inbounds float, float* %c, i32 %index
-  %11 = bitcast float* %10 to <4 x float>*
-  store <4 x float> %9, <4 x float>* %11, align 4
+  %5 = getelementptr inbounds float, ptr %a, i32 %index
+  %wide.load = load <4 x float>, ptr %5, align 4
+  %6 = getelementptr inbounds float, ptr %b, i32 %index
+  %wide.load21 = load <4 x float>, ptr %6, align 4
+  %7 = fsub <4 x float> %wide.load, %wide.load21
+  %8 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -604,43 +595,43 @@ for.cond.cleanup:                                 ; preds = %for.body.prol.loope
 
 for.body:                                         ; preds = %for.body.prol.loopexit, %for.body
   %i.09 = phi i32 [ %inc.3, %for.body ], [ %i.09.unr, %for.body.prol.loopexit ]
-  %arrayidx = getelementptr inbounds float, float* %a, i32 %i.09
-  %13 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %b, i32 %i.09
-  %14 = load float, float* %arrayidx1, align 4
-  %sub = fsub float %13, %14
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %sub, float* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 %i.09
+  %10 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %b, i32 %i.09
+  %11 = load float, ptr %arrayidx1, align 4
+  %sub = fsub float %10, %11
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %sub, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
-  %arrayidx.1 = getelementptr inbounds float, float* %a, i32 %inc
-  %15 = load float, float* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds float, float* %b, i32 %inc
-  %16 = load float, float* %arrayidx1.1, align 4
-  %sub.1 = fsub float %15, %16
-  %arrayidx2.1 = getelementptr inbounds float, float* %c, i32 %inc
-  store float %sub.1, float* %arrayidx2.1, align 4
+  %arrayidx.1 = getelementptr inbounds float, ptr %a, i32 %inc
+  %12 = load float, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds float, ptr %b, i32 %inc
+  %13 = load float, ptr %arrayidx1.1, align 4
+  %sub.1 = fsub float %12, %13
+  %arrayidx2.1 = getelementptr inbounds float, ptr %c, i32 %inc
+  store float %sub.1, ptr %arrayidx2.1, align 4
   %inc.1 = add nuw i32 %i.09, 2
-  %arrayidx.2 = getelementptr inbounds float, float* %a, i32 %inc.1
-  %17 = load float, float* %arrayidx.2, align 4
-  %arrayidx1.2 = getelementptr inbounds float, float* %b, i32 %inc.1
-  %18 = load float, float* %arrayidx1.2, align 4
-  %sub.2 = fsub float %17, %18
-  %arrayidx2.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  store float %sub.2, float* %arrayidx2.2, align 4
+  %arrayidx.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
+  %14 = load float, ptr %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds float, ptr %b, i32 %inc.1
+  %15 = load float, ptr %arrayidx1.2, align 4
+  %sub.2 = fsub float %14, %15
+  %arrayidx2.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
+  store float %sub.2, ptr %arrayidx2.2, align 4
   %inc.2 = add nuw i32 %i.09, 3
-  %arrayidx.3 = getelementptr inbounds float, float* %a, i32 %inc.2
-  %19 = load float, float* %arrayidx.3, align 4
-  %arrayidx1.3 = getelementptr inbounds float, float* %b, i32 %inc.2
-  %20 = load float, float* %arrayidx1.3, align 4
-  %sub.3 = fsub float %19, %20
-  %arrayidx2.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  store float %sub.3, float* %arrayidx2.3, align 4
+  %arrayidx.3 = getelementptr inbounds float, ptr %a, i32 %inc.2
+  %16 = load float, ptr %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds float, ptr %b, i32 %inc.2
+  %17 = load float, ptr %arrayidx1.3, align 4
+  %sub.3 = fsub float %16, %17
+  %arrayidx2.3 = getelementptr inbounds float, ptr %c, i32 %inc.2
+  store float %sub.3, ptr %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
   %exitcond.3 = icmp eq i32 %inc.3, %N
   br i1 %exitcond.3, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @float_int_mul(float* nocapture readonly %a, i32* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @float_int_mul(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: float_int_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
@@ -755,14 +746,14 @@ for.body.preheader16:                             ; preds = %middle.block, %vect
 for.body.prol:                                    ; preds = %for.body.preheader16, %for.body.prol
   %i.09.prol = phi i32 [ %inc.prol, %for.body.prol ], [ %i.09.ph, %for.body.preheader16 ]
   %prol.iter = phi i32 [ %prol.iter.sub, %for.body.prol ], [ %xtraiter, %for.body.preheader16 ]
-  %arrayidx.prol = getelementptr inbounds float, float* %a, i32 %i.09.prol
-  %2 = load float, float* %arrayidx.prol, align 4
-  %arrayidx1.prol = getelementptr inbounds i32, i32* %b, i32 %i.09.prol
-  %3 = load i32, i32* %arrayidx1.prol, align 4
+  %arrayidx.prol = getelementptr inbounds float, ptr %a, i32 %i.09.prol
+  %2 = load float, ptr %arrayidx.prol, align 4
+  %arrayidx1.prol = getelementptr inbounds i32, ptr %b, i32 %i.09.prol
+  %3 = load i32, ptr %arrayidx1.prol, align 4
   %conv.prol = sitofp i32 %3 to float
   %mul.prol = fmul float %2, %conv.prol
-  %arrayidx2.prol = getelementptr inbounds float, float* %c, i32 %i.09.prol
-  store float %mul.prol, float* %arrayidx2.prol, align 4
+  %arrayidx2.prol = getelementptr inbounds float, ptr %c, i32 %i.09.prol
+  store float %mul.prol, ptr %arrayidx2.prol, align 4
   %inc.prol = add nuw i32 %i.09.prol, 1
   %prol.iter.sub = add i32 %prol.iter, -1
   %prol.iter.cmp = icmp eq i32 %prol.iter.sub, 0
@@ -774,10 +765,10 @@ for.body.prol.loopexit:                           ; preds = %for.body.prol, %for
   br i1 %4, label %for.cond.cleanup, label %for.body
 
 vector.memcheck:                                  ; preds = %for.body.preheader
-  %scevgep = getelementptr float, float* %c, i32 %N
-  %scevgep13 = getelementptr float, float* %a, i32 %N
-  %bound0 = icmp ugt float* %scevgep13, %c
-  %bound1 = icmp ugt float* %scevgep, %a
+  %scevgep = getelementptr float, ptr %c, i32 %N
+  %scevgep13 = getelementptr float, ptr %a, i32 %N
+  %bound0 = icmp ugt ptr %scevgep13, %c
+  %bound1 = icmp ugt ptr %scevgep, %a
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %for.body.preheader16, label %vector.ph
 
@@ -787,20 +778,17 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %5 = getelementptr inbounds float, float* %a, i32 %index
-  %6 = bitcast float* %5 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %6, align 4
-  %7 = getelementptr inbounds i32, i32* %b, i32 %index
-  %8 = bitcast i32* %7 to <4 x i32>*
-  %wide.load15 = load <4 x i32>, <4 x i32>* %8, align 4
-  %9 = sitofp <4 x i32> %wide.load15 to <4 x float>
-  %10 = fmul <4 x float> %wide.load, %9
-  %11 = getelementptr inbounds float, float* %c, i32 %index
-  %12 = bitcast float* %11 to <4 x float>*
-  store <4 x float> %10, <4 x float>* %12, align 4
+  %5 = getelementptr inbounds float, ptr %a, i32 %index
+  %wide.load = load <4 x float>, ptr %5, align 4
+  %6 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.load15 = load <4 x i32>, ptr %6, align 4
+  %7 = sitofp <4 x i32> %wide.load15 to <4 x float>
+  %8 = fmul <4 x float> %wide.load, %7
+  %9 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %8, ptr %9, align 4
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %middle.block, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -811,47 +799,47 @@ for.cond.cleanup:                                 ; preds = %for.body.prol.loope
 
 for.body:                                         ; preds = %for.body.prol.loopexit, %for.body
   %i.09 = phi i32 [ %inc.3, %for.body ], [ %i.09.unr, %for.body.prol.loopexit ]
-  %arrayidx = getelementptr inbounds float, float* %a, i32 %i.09
-  %14 = load float, float* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %b, i32 %i.09
-  %15 = load i32, i32* %arrayidx1, align 4
-  %conv = sitofp i32 %15 to float
-  %mul = fmul float %14, %conv
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %mul, float* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 %i.09
+  %11 = load float, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %b, i32 %i.09
+  %12 = load i32, ptr %arrayidx1, align 4
+  %conv = sitofp i32 %12 to float
+  %mul = fmul float %11, %conv
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %mul, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
-  %arrayidx.1 = getelementptr inbounds float, float* %a, i32 %inc
-  %16 = load float, float* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds i32, i32* %b, i32 %inc
-  %17 = load i32, i32* %arrayidx1.1, align 4
-  %conv.1 = sitofp i32 %17 to float
-  %mul.1 = fmul float %16, %conv.1
-  %arrayidx2.1 = getelementptr inbounds float, float* %c, i32 %inc
-  store float %mul.1, float* %arrayidx2.1, align 4
+  %arrayidx.1 = getelementptr inbounds float, ptr %a, i32 %inc
+  %13 = load float, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds i32, ptr %b, i32 %inc
+  %14 = load i32, ptr %arrayidx1.1, align 4
+  %conv.1 = sitofp i32 %14 to float
+  %mul.1 = fmul float %13, %conv.1
+  %arrayidx2.1 = getelementptr inbounds float, ptr %c, i32 %inc
+  store float %mul.1, ptr %arrayidx2.1, align 4
   %inc.1 = add nuw i32 %i.09, 2
-  %arrayidx.2 = getelementptr inbounds float, float* %a, i32 %inc.1
-  %18 = load float, float* %arrayidx.2, align 4
-  %arrayidx1.2 = getelementptr inbounds i32, i32* %b, i32 %inc.1
-  %19 = load i32, i32* %arrayidx1.2, align 4
-  %conv.2 = sitofp i32 %19 to float
-  %mul.2 = fmul float %18, %conv.2
-  %arrayidx2.2 = getelementptr inbounds float, float* %c, i32 %inc.1
-  store float %mul.2, float* %arrayidx2.2, align 4
+  %arrayidx.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
+  %15 = load float, ptr %arrayidx.2, align 4
+  %arrayidx1.2 = getelementptr inbounds i32, ptr %b, i32 %inc.1
+  %16 = load i32, ptr %arrayidx1.2, align 4
+  %conv.2 = sitofp i32 %16 to float
+  %mul.2 = fmul float %15, %conv.2
+  %arrayidx2.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
+  store float %mul.2, ptr %arrayidx2.2, align 4
   %inc.2 = add nuw i32 %i.09, 3
-  %arrayidx.3 = getelementptr inbounds float, float* %a, i32 %inc.2
-  %20 = load float, float* %arrayidx.3, align 4
-  %arrayidx1.3 = getelementptr inbounds i32, i32* %b, i32 %inc.2
-  %21 = load i32, i32* %arrayidx1.3, align 4
-  %conv.3 = sitofp i32 %21 to float
-  %mul.3 = fmul float %20, %conv.3
-  %arrayidx2.3 = getelementptr inbounds float, float* %c, i32 %inc.2
-  store float %mul.3, float* %arrayidx2.3, align 4
+  %arrayidx.3 = getelementptr inbounds float, ptr %a, i32 %inc.2
+  %17 = load float, ptr %arrayidx.3, align 4
+  %arrayidx1.3 = getelementptr inbounds i32, ptr %b, i32 %inc.2
+  %18 = load i32, ptr %arrayidx1.3, align 4
+  %conv.3 = sitofp i32 %18 to float
+  %mul.3 = fmul float %17, %conv.3
+  %arrayidx2.3 = getelementptr inbounds float, ptr %c, i32 %inc.2
+  store float %mul.3, ptr %arrayidx2.3, align 4
   %inc.3 = add nuw i32 %i.09, 4
   %exitcond.3 = icmp eq i32 %inc.3, %N
   br i1 %exitcond.3, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @float_int_int_mul(i32* nocapture readonly %a, i32* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @float_int_int_mul(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: float_int_int_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, lr}
@@ -916,20 +904,17 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %a, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = getelementptr inbounds i32, i32* %b, i32 %index
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load10 = load <4 x i32>, <4 x i32>* %3, align 4
-  %4 = mul nsw <4 x i32> %wide.load10, %wide.load
-  %5 = sitofp <4 x i32> %4 to <4 x float>
-  %6 = getelementptr inbounds float, float* %c, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %7, align 4
+  %0 = getelementptr inbounds i32, ptr %a, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.load10 = load <4 x i32>, ptr %1, align 4
+  %2 = mul nsw <4 x i32> %wide.load10, %wide.load
+  %3 = sitofp <4 x i32> %2 to <4 x float>
+  %4 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %3, ptr %4, align 4
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %middle.block, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -940,20 +925,20 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader11, %for.body
   %i.09 = phi i32 [ %inc, %for.body ], [ %i.09.ph, %for.body.preheader11 ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.09
-  %9 = load i32, i32* %arrayidx, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %b, i32 %i.09
-  %10 = load i32, i32* %arrayidx1, align 4
-  %mul = mul nsw i32 %10, %9
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %i.09
+  %6 = load i32, ptr %arrayidx, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %b, i32 %i.09
+  %7 = load i32, ptr %arrayidx1, align 4
+  %mul = mul nsw i32 %7, %6
   %conv = sitofp i32 %mul to float
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %conv, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %conv, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @half_half_mul(half* nocapture readonly %a, half* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @half_half_mul(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: half_half_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -1030,20 +1015,17 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %a, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = getelementptr inbounds half, half* %b, i32 %index
-  %3 = bitcast half* %2 to <4 x half>*
-  %wide.load10 = load <4 x half>, <4 x half>* %3, align 2
-  %4 = fmul <4 x half> %wide.load, %wide.load10
-  %5 = fpext <4 x half> %4 to <4 x float>
-  %6 = getelementptr inbounds float, float* %c, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %7, align 4
+  %0 = getelementptr inbounds half, ptr %a, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = getelementptr inbounds half, ptr %b, i32 %index
+  %wide.load10 = load <4 x half>, ptr %1, align 2
+  %2 = fmul <4 x half> %wide.load, %wide.load10
+  %3 = fpext <4 x half> %2 to <4 x float>
+  %4 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %3, ptr %4, align 4
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %middle.block, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1054,20 +1036,20 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader11, %for.body
   %i.09 = phi i32 [ %inc, %for.body ], [ %i.09.ph, %for.body.preheader11 ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.09
-  %9 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds half, half* %b, i32 %i.09
-  %10 = load half, half* %arrayidx1, align 2
-  %mul = fmul half %9, %10
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.09
+  %6 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds half, ptr %b, i32 %i.09
+  %7 = load half, ptr %arrayidx1, align 2
+  %mul = fmul half %6, %7
   %conv = fpext half %mul to float
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %conv, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %conv, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @half_half_add(half* nocapture readonly %a, half* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @half_half_add(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: half_half_add:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -1144,20 +1126,17 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %a, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = getelementptr inbounds half, half* %b, i32 %index
-  %3 = bitcast half* %2 to <4 x half>*
-  %wide.load10 = load <4 x half>, <4 x half>* %3, align 2
-  %4 = fadd <4 x half> %wide.load, %wide.load10
-  %5 = fpext <4 x half> %4 to <4 x float>
-  %6 = getelementptr inbounds float, float* %c, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %7, align 4
+  %0 = getelementptr inbounds half, ptr %a, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = getelementptr inbounds half, ptr %b, i32 %index
+  %wide.load10 = load <4 x half>, ptr %1, align 2
+  %2 = fadd <4 x half> %wide.load, %wide.load10
+  %3 = fpext <4 x half> %2 to <4 x float>
+  %4 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %3, ptr %4, align 4
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %middle.block, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1168,20 +1147,20 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader11, %for.body
   %i.09 = phi i32 [ %inc, %for.body ], [ %i.09.ph, %for.body.preheader11 ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.09
-  %9 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds half, half* %b, i32 %i.09
-  %10 = load half, half* %arrayidx1, align 2
-  %add = fadd half %9, %10
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.09
+  %6 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds half, ptr %b, i32 %i.09
+  %7 = load half, ptr %arrayidx1, align 2
+  %add = fadd half %6, %7
   %conv = fpext half %add to float
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %conv, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %conv, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @half_half_sub(half* nocapture readonly %a, half* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @half_half_sub(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: half_half_sub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -1258,20 +1237,17 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %a, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = getelementptr inbounds half, half* %b, i32 %index
-  %3 = bitcast half* %2 to <4 x half>*
-  %wide.load10 = load <4 x half>, <4 x half>* %3, align 2
-  %4 = fsub <4 x half> %wide.load, %wide.load10
-  %5 = fpext <4 x half> %4 to <4 x float>
-  %6 = getelementptr inbounds float, float* %c, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %7, align 4
+  %0 = getelementptr inbounds half, ptr %a, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = getelementptr inbounds half, ptr %b, i32 %index
+  %wide.load10 = load <4 x half>, ptr %1, align 2
+  %2 = fsub <4 x half> %wide.load, %wide.load10
+  %3 = fpext <4 x half> %2 to <4 x float>
+  %4 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %3, ptr %4, align 4
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %middle.block, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1282,20 +1258,20 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader11, %for.body
   %i.09 = phi i32 [ %inc, %for.body ], [ %i.09.ph, %for.body.preheader11 ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.09
-  %9 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds half, half* %b, i32 %i.09
-  %10 = load half, half* %arrayidx1, align 2
-  %sub = fsub half %9, %10
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.09
+  %6 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds half, ptr %b, i32 %i.09
+  %7 = load half, ptr %arrayidx1, align 2
+  %sub = fsub half %6, %7
   %conv = fpext half %sub to float
-  %arrayidx2 = getelementptr inbounds float, float* %c, i32 %i.09
-  store float %conv, float* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %c, i32 %i.09
+  store float %conv, ptr %arrayidx2, align 4
   %inc = add nuw i32 %i.09, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @half_short_mul(half* nocapture readonly %a, i16* nocapture readonly %b, float* nocapture %c, i32 %N) {
+define arm_aapcs_vfpcc void @half_short_mul(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr nocapture %c, i32 %N) {
 ; CHECK-LABEL: half_short_mul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1380,21 +1356,18 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %a, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = getelementptr inbounds i16, i16* %b, i32 %index
-  %3 = bitcast i16* %2 to <4 x i16>*
-  %wide.load12 = load <4 x i16>, <4 x i16>* %3, align 2
-  %4 = sitofp <4 x i16> %wide.load12 to <4 x half>
-  %5 = fmul <4 x half> %wide.load, %4
-  %6 = fpext <4 x half> %5 to <4 x float>
-  %7 = getelementptr inbounds float, float* %c, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %8, align 4
+  %0 = getelementptr inbounds half, ptr %a, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = getelementptr inbounds i16, ptr %b, i32 %index
+  %wide.load12 = load <4 x i16>, ptr %1, align 2
+  %2 = sitofp <4 x i16> %wide.load12 to <4 x half>
+  %3 = fmul <4 x half> %wide.load, %2
+  %4 = fpext <4 x half> %3 to <4 x float>
+  %5 = getelementptr inbounds float, ptr %c, i32 %index
+  store <4 x float> %4, ptr %5, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %middle.block, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1405,21 +1378,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader13, %for.body
   %i.011 = phi i32 [ %inc, %for.body ], [ %i.011.ph, %for.body.preheader13 ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.011
-  %10 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds i16, i16* %b, i32 %i.011
-  %11 = load i16, i16* %arrayidx1, align 2
-  %conv2 = sitofp i16 %11 to half
-  %mul = fmul half %10, %conv2
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.011
+  %7 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds i16, ptr %b, i32 %i.011
+  %8 = load i16, ptr %arrayidx1, align 2
+  %conv2 = sitofp i16 %8 to half
+  %mul = fmul half %7, %conv2
   %conv3 = fpext half %mul to float
-  %arrayidx4 = getelementptr inbounds float, float* %c, i32 %i.011
-  store float %conv3, float* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %c, i32 %i.011
+  store float %conv3, ptr %arrayidx4, align 4
   %inc = add nuw i32 %i.011, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc float @half_half_mac(half* nocapture readonly %a, half* nocapture readonly %b, i32 %N) {
+define arm_aapcs_vfpcc float @half_half_mac(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) {
 ; CHECK-LABEL: half_half_mac:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
@@ -1517,10 +1490,10 @@ for.body.epil:                                    ; preds = %for.cond.cleanup.lo
   %i.010.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %res.09.epil = phi float [ %add.epil, %for.body.epil ], [ %res.09.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
-  %arrayidx.epil = getelementptr inbounds half, half* %a, i32 %i.010.epil
-  %2 = load half, half* %arrayidx.epil, align 2
-  %arrayidx1.epil = getelementptr inbounds half, half* %b, i32 %i.010.epil
-  %3 = load half, half* %arrayidx1.epil, align 2
+  %arrayidx.epil = getelementptr inbounds half, ptr %a, i32 %i.010.epil
+  %2 = load half, ptr %arrayidx.epil, align 2
+  %arrayidx1.epil = getelementptr inbounds half, ptr %b, i32 %i.010.epil
+  %3 = load half, ptr %arrayidx1.epil, align 2
   %mul.epil = fmul half %2, %3
   %conv.epil = fpext half %mul.epil to float
   %add.epil = fadd float %res.09.epil, %conv.epil
@@ -1537,34 +1510,34 @@ for.body:                                         ; preds = %for.body, %for.body
   %i.010 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %res.09 = phi float [ 0.000000e+00, %for.body.preheader.new ], [ %add.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.010
-  %4 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds half, half* %b, i32 %i.010
-  %5 = load half, half* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.010
+  %4 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds half, ptr %b, i32 %i.010
+  %5 = load half, ptr %arrayidx1, align 2
   %mul = fmul half %4, %5
   %conv = fpext half %mul to float
   %add = fadd float %res.09, %conv
   %inc = or i32 %i.010, 1
-  %arrayidx.1 = getelementptr inbounds half, half* %a, i32 %inc
-  %6 = load half, half* %arrayidx.1, align 2
-  %arrayidx1.1 = getelementptr inbounds half, half* %b, i32 %inc
-  %7 = load half, half* %arrayidx1.1, align 2
+  %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
+  %6 = load half, ptr %arrayidx.1, align 2
+  %arrayidx1.1 = getelementptr inbounds half, ptr %b, i32 %inc
+  %7 = load half, ptr %arrayidx1.1, align 2
   %mul.1 = fmul half %6, %7
   %conv.1 = fpext half %mul.1 to float
   %add.1 = fadd float %add, %conv.1
   %inc.1 = or i32 %i.010, 2
-  %arrayidx.2 = getelementptr inbounds half, half* %a, i32 %inc.1
-  %8 = load half, half* %arrayidx.2, align 2
-  %arrayidx1.2 = getelementptr inbounds half, half* %b, i32 %inc.1
-  %9 = load half, half* %arrayidx1.2, align 2
+  %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
+  %8 = load half, ptr %arrayidx.2, align 2
+  %arrayidx1.2 = getelementptr inbounds half, ptr %b, i32 %inc.1
+  %9 = load half, ptr %arrayidx1.2, align 2
   %mul.2 = fmul half %8, %9
   %conv.2 = fpext half %mul.2 to float
   %add.2 = fadd float %add.1, %conv.2
   %inc.2 = or i32 %i.010, 3
-  %arrayidx.3 = getelementptr inbounds half, half* %a, i32 %inc.2
-  %10 = load half, half* %arrayidx.3, align 2
-  %arrayidx1.3 = getelementptr inbounds half, half* %b, i32 %inc.2
-  %11 = load half, half* %arrayidx1.3, align 2
+  %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
+  %10 = load half, ptr %arrayidx.3, align 2
+  %arrayidx1.3 = getelementptr inbounds half, ptr %b, i32 %inc.2
+  %11 = load half, ptr %arrayidx1.3, align 2
   %mul.3 = fmul half %10, %11
   %conv.3 = fpext half %mul.3 to float
   %add.3 = fadd float %add.2, %conv.3
@@ -1574,7 +1547,7 @@ for.body:                                         ; preds = %for.body, %for.body
   br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
 }
 
-define arm_aapcs_vfpcc float @half_half_acc(half* nocapture readonly %a, half* nocapture readonly %b, i32 %N) {
+define arm_aapcs_vfpcc float @half_half_acc(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) {
 ; CHECK-LABEL: half_half_acc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
@@ -1672,10 +1645,10 @@ for.body.epil:                                    ; preds = %for.cond.cleanup.lo
   %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %res.010.epil = phi float [ %add2.epil, %for.body.epil ], [ %res.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
-  %arrayidx.epil = getelementptr inbounds half, half* %a, i32 %i.011.epil
-  %2 = load half, half* %arrayidx.epil, align 2
-  %arrayidx1.epil = getelementptr inbounds half, half* %b, i32 %i.011.epil
-  %3 = load half, half* %arrayidx1.epil, align 2
+  %arrayidx.epil = getelementptr inbounds half, ptr %a, i32 %i.011.epil
+  %2 = load half, ptr %arrayidx.epil, align 2
+  %arrayidx1.epil = getelementptr inbounds half, ptr %b, i32 %i.011.epil
+  %3 = load half, ptr %arrayidx1.epil, align 2
   %add.epil = fadd half %2, %3
   %conv.epil = fpext half %add.epil to float
   %add2.epil = fadd float %res.010.epil, %conv.epil
@@ -1692,34 +1665,34 @@ for.body:                                         ; preds = %for.body, %for.body
   %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %res.010 = phi float [ 0.000000e+00, %for.body.preheader.new ], [ %add2.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.011
-  %4 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds half, half* %b, i32 %i.011
-  %5 = load half, half* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.011
+  %4 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds half, ptr %b, i32 %i.011
+  %5 = load half, ptr %arrayidx1, align 2
   %add = fadd half %4, %5
   %conv = fpext half %add to float
   %add2 = fadd float %res.010, %conv
   %inc = or i32 %i.011, 1
-  %arrayidx.1 = getelementptr inbounds half, half* %a, i32 %inc
-  %6 = load half, half* %arrayidx.1, align 2
-  %arrayidx1.1 = getelementptr inbounds half, half* %b, i32 %inc
-  %7 = load half, half* %arrayidx1.1, align 2
+  %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
+  %6 = load half, ptr %arrayidx.1, align 2
+  %arrayidx1.1 = getelementptr inbounds half, ptr %b, i32 %inc
+  %7 = load half, ptr %arrayidx1.1, align 2
   %add.1 = fadd half %6, %7
   %conv.1 = fpext half %add.1 to float
   %add2.1 = fadd float %add2, %conv.1
   %inc.1 = or i32 %i.011, 2
-  %arrayidx.2 = getelementptr inbounds half, half* %a, i32 %inc.1
-  %8 = load half, half* %arrayidx.2, align 2
-  %arrayidx1.2 = getelementptr inbounds half, half* %b, i32 %inc.1
-  %9 = load half, half* %arrayidx1.2, align 2
+  %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
+  %8 = load half, ptr %arrayidx.2, align 2
+  %arrayidx1.2 = getelementptr inbounds half, ptr %b, i32 %inc.1
+  %9 = load half, ptr %arrayidx1.2, align 2
   %add.2 = fadd half %8, %9
   %conv.2 = fpext half %add.2 to float
   %add2.2 = fadd float %add2.1, %conv.2
   %inc.2 = or i32 %i.011, 3
-  %arrayidx.3 = getelementptr inbounds half, half* %a, i32 %inc.2
-  %10 = load half, half* %arrayidx.3, align 2
-  %arrayidx1.3 = getelementptr inbounds half, half* %b, i32 %inc.2
-  %11 = load half, half* %arrayidx1.3, align 2
+  %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
+  %10 = load half, ptr %arrayidx.3, align 2
+  %arrayidx1.3 = getelementptr inbounds half, ptr %b, i32 %inc.2
+  %11 = load half, ptr %arrayidx1.3, align 2
   %add.3 = fadd half %10, %11
   %conv.3 = fpext half %add.3 to float
   %add2.3 = fadd float %add2.2, %conv.3
@@ -1729,7 +1702,7 @@ for.body:                                         ; preds = %for.body, %for.body
   br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
 }
 
-define arm_aapcs_vfpcc float @half_short_mac(half* nocapture readonly %a, i16* nocapture readonly %b, i32 %N) {
+define arm_aapcs_vfpcc float @half_short_mac(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) {
 ; CHECK-LABEL: half_short_mac:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, lr}
@@ -1835,10 +1808,10 @@ for.body.epil:                                    ; preds = %for.cond.cleanup.lo
   %i.012.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.012.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %res.011.epil = phi float [ %add.epil, %for.body.epil ], [ %res.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
   %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
-  %arrayidx.epil = getelementptr inbounds half, half* %a, i32 %i.012.epil
-  %2 = load half, half* %arrayidx.epil, align 2
-  %arrayidx1.epil = getelementptr inbounds i16, i16* %b, i32 %i.012.epil
-  %3 = load i16, i16* %arrayidx1.epil, align 2
+  %arrayidx.epil = getelementptr inbounds half, ptr %a, i32 %i.012.epil
+  %2 = load half, ptr %arrayidx.epil, align 2
+  %arrayidx1.epil = getelementptr inbounds i16, ptr %b, i32 %i.012.epil
+  %3 = load i16, ptr %arrayidx1.epil, align 2
   %conv2.epil = sitofp i16 %3 to half
   %mul.epil = fmul half %2, %conv2.epil
   %conv3.epil = fpext half %mul.epil to float
@@ -1856,37 +1829,37 @@ for.body:                                         ; preds = %for.body, %for.body
   %i.012 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %res.011 = phi float [ 0.000000e+00, %for.body.preheader.new ], [ %add.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
-  %arrayidx = getelementptr inbounds half, half* %a, i32 %i.012
-  %4 = load half, half* %arrayidx, align 2
-  %arrayidx1 = getelementptr inbounds i16, i16* %b, i32 %i.012
-  %5 = load i16, i16* %arrayidx1, align 2
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 %i.012
+  %4 = load half, ptr %arrayidx, align 2
+  %arrayidx1 = getelementptr inbounds i16, ptr %b, i32 %i.012
+  %5 = load i16, ptr %arrayidx1, align 2
   %conv2 = sitofp i16 %5 to half
   %mul = fmul half %4, %conv2
   %conv3 = fpext half %mul to float
   %add = fadd float %res.011, %conv3
   %inc = or i32 %i.012, 1
-  %arrayidx.1 = getelementptr inbounds half, half* %a, i32 %inc
-  %6 = load half, half* %arrayidx.1, align 2
-  %arrayidx1.1 = getelementptr inbounds i16, i16* %b, i32 %inc
-  %7 = load i16, i16* %arrayidx1.1, align 2
+  %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
+  %6 = load half, ptr %arrayidx.1, align 2
+  %arrayidx1.1 = getelementptr inbounds i16, ptr %b, i32 %inc
+  %7 = load i16, ptr %arrayidx1.1, align 2
   %conv2.1 = sitofp i16 %7 to half
   %mul.1 = fmul half %6, %conv2.1
   %conv3.1 = fpext half %mul.1 to float
   %add.1 = fadd float %add, %conv3.1
   %inc.1 = or i32 %i.012, 2
-  %arrayidx.2 = getelementptr inbounds half, half* %a, i32 %inc.1
-  %8 = load half, half* %arrayidx.2, align 2
-  %arrayidx1.2 = getelementptr inbounds i16, i16* %b, i32 %inc.1
-  %9 = load i16, i16* %arrayidx1.2, align 2
+  %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
+  %8 = load half, ptr %arrayidx.2, align 2
+  %arrayidx1.2 = getelementptr inbounds i16, ptr %b, i32 %inc.1
+  %9 = load i16, ptr %arrayidx1.2, align 2
   %conv2.2 = sitofp i16 %9 to half
   %mul.2 = fmul half %8, %conv2.2
   %conv3.2 = fpext half %mul.2 to float
   %add.2 = fadd float %add.1, %conv3.2
   %inc.2 = or i32 %i.012, 3
-  %arrayidx.3 = getelementptr inbounds half, half* %a, i32 %inc.2
-  %10 = load half, half* %arrayidx.3, align 2
-  %arrayidx1.3 = getelementptr inbounds i16, i16* %b, i32 %inc.2
-  %11 = load i16, i16* %arrayidx1.3, align 2
+  %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
+  %10 = load half, ptr %arrayidx.3, align 2
+  %arrayidx1.3 = getelementptr inbounds i16, ptr %b, i32 %inc.2
+  %11 = load i16, ptr %arrayidx1.3, align 2
   %conv2.3 = sitofp i16 %11 to half
   %mul.3 = fmul half %10, %conv2.3
   %conv3.3 = fpext half %mul.3 to float

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
index 6b71a070d651b..0ddbbb4d53dd1 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=armv8.1m.main -mattr=+mve -S -mve-tail-predication -tail-predication=enabled %s -o - | FileCheck %s
 
-define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) {
+define void @mat_vec_sext_i16(ptr nocapture readonly %A, ptr nocapture readonly %B, ptr noalias nocapture %C, i32 %N) {
 ; CHECK-LABEL: @mat_vec_sext_i16(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP24:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -15,10 +15,10 @@ define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readon
 ; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER_US:%.*]]
 ; CHECK:       for.cond1.preheader.us:
 ; CHECK-NEXT:    [[I_025_US:%.*]] = phi i32 [ [[INC10_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
-; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16*, i16** [[A:%.*]], i32 [[I_025_US]]
-; CHECK-NEXT:    [[TT3:%.*]] = load i16*, i16** [[ARRAYIDX_US]], align 4
-; CHECK-NEXT:    [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_025_US]]
-; CHECK-NEXT:    [[ARRAYIDX8_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX8_US]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i32 [[I_025_US]]
+; CHECK-NEXT:    [[TT3:%.*]] = load ptr, ptr [[ARRAYIDX_US]], align 4
+; CHECK-NEXT:    [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_025_US]]
+; CHECK-NEXT:    [[ARRAYIDX8_PROMOTED_US:%.*]] = load i32, ptr [[ARRAYIDX8_US]], align 4
 ; CHECK-NEXT:    [[TT4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX8_PROMOTED_US]], i32 0
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TT2]])
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
@@ -27,15 +27,13 @@ define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readon
 ; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TT4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT14:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TT5:%.*]] = phi i32 [ [[START]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT15:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TT6:%.*]] = getelementptr inbounds i16, i16* [[TT3]], i32 [[INDEX]]
+; CHECK-NEXT:    [[TT6:%.*]] = getelementptr inbounds i16, ptr [[TT3]], i32 [[INDEX]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]])
 ; CHECK-NEXT:    [[TMP2]] = sub i32 [[TMP0]], 4
-; CHECK-NEXT:    [[TT8:%.*]] = bitcast i16* [[TT6]] to <4 x i16>*
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TT8]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[TT6]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
 ; CHECK-NEXT:    [[TT9:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD]] to <4 x i32>
-; CHECK-NEXT:    [[TT10:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[INDEX]]
-; CHECK-NEXT:    [[TT11:%.*]] = bitcast i16* [[TT10]] to <4 x i16>*
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD30:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TT11]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
+; CHECK-NEXT:    [[TT10:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD30:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[TT10]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
 ; CHECK-NEXT:    [[TT12:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD30]] to <4 x i32>
 ; CHECK-NEXT:    [[TT13:%.*]] = mul nsw <4 x i32> [[TT12]], [[TT9]]
 ; CHECK-NEXT:    [[TT14]] = add nsw <4 x i32> [[TT13]], [[VEC_PHI]]
@@ -46,7 +44,7 @@ define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readon
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[TT17:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TT14]], <4 x i32> [[VEC_PHI]]
 ; CHECK-NEXT:    [[TT18:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TT17]])
-; CHECK-NEXT:    store i32 [[TT18]], i32* [[ARRAYIDX8_US]], align 4
+; CHECK-NEXT:    store i32 [[TT18]], ptr [[ARRAYIDX8_US]], align 4
 ; CHECK-NEXT:    [[INC10_US]] = add nuw i32 [[I_025_US]], 1
 ; CHECK-NEXT:    [[EXITCOND27:%.*]] = icmp eq i32 [[INC10_US]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND27]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]]
@@ -67,10 +65,10 @@ for.cond1.preheader.us.preheader:                 ; preds = %entry
 
 for.cond1.preheader.us:                           ; preds = %middle.block, %for.cond1.preheader.us.preheader
   %i.025.us = phi i32 [ %inc10.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ]
-  %arrayidx.us = getelementptr inbounds i16*, i16** %A, i32 %i.025.us
-  %tt3 = load i16*, i16** %arrayidx.us, align 4
-  %arrayidx8.us = getelementptr inbounds i32, i32* %C, i32 %i.025.us
-  %arrayidx8.promoted.us = load i32, i32* %arrayidx8.us, align 4
+  %arrayidx.us = getelementptr inbounds ptr, ptr %A, i32 %i.025.us
+  %tt3 = load ptr, ptr %arrayidx.us, align 4
+  %arrayidx8.us = getelementptr inbounds i32, ptr %C, i32 %i.025.us
+  %arrayidx8.promoted.us = load i32, ptr %arrayidx8.us, align 4
   %tt4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx8.promoted.us, i32 0
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %tt2)
   br label %vector.body
@@ -79,14 +77,12 @@ vector.body:                                      ; preds = %vector.body, %for.c
   %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ %tt4, %for.cond1.preheader.us ], [ %tt14, %vector.body ]
   %tt5 = phi i32 [ %start, %for.cond1.preheader.us ], [ %tt15, %vector.body ]
-  %tt6 = getelementptr inbounds i16, i16* %tt3, i32 %index
+  %tt6 = getelementptr inbounds i16, ptr %tt3, i32 %index
   %tt7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tt8 = bitcast i16* %tt6 to <4 x i16>*
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tt8, i32 2, <4 x i1> %tt7, <4 x i16> undef)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %tt6, i32 2, <4 x i1> %tt7, <4 x i16> undef)
   %tt9 = sext <4 x i16> %wide.masked.load to <4 x i32>
-  %tt10 = getelementptr inbounds i16, i16* %B, i32 %index
-  %tt11 = bitcast i16* %tt10 to <4 x i16>*
-  %wide.masked.load30 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tt11, i32 2, <4 x i1> %tt7, <4 x i16> undef)
+  %tt10 = getelementptr inbounds i16, ptr %B, i32 %index
+  %wide.masked.load30 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %tt10, i32 2, <4 x i1> %tt7, <4 x i16> undef)
   %tt12 = sext <4 x i16> %wide.masked.load30 to <4 x i32>
   %tt13 = mul nsw <4 x i32> %tt12, %tt9
   %tt14 = add nsw <4 x i32> %tt13, %vec.phi
@@ -98,7 +94,7 @@ vector.body:                                      ; preds = %vector.body, %for.c
 middle.block:                                     ; preds = %vector.body
   %tt17 = select <4 x i1> %tt7, <4 x i32> %tt14, <4 x i32> %vec.phi
   %tt18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tt17)
-  store i32 %tt18, i32* %arrayidx8.us, align 4
+  store i32 %tt18, ptr %arrayidx8.us, align 4
   %inc10.us = add nuw i32 %i.025.us, 1
   %exitcond27 = icmp eq i32 %inc10.us, %N
   br i1 %exitcond27, label %for.cond.cleanup, label %for.cond1.preheader.us
@@ -107,7 +103,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret void
 }
 
-define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) {
+define void @mat_vec_i32(ptr nocapture readonly %A, ptr nocapture readonly %B, ptr noalias nocapture %C, i32 %N) {
 ; CHECK-LABEL: @mat_vec_i32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP23:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -121,10 +117,10 @@ define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B
 ; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER_US:%.*]]
 ; CHECK:       for.cond1.preheader.us:
 ; CHECK-NEXT:    [[I_024_US:%.*]] = phi i32 [ [[INC9_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
-; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32*, i32** [[A:%.*]], i32 [[I_024_US]]
-; CHECK-NEXT:    [[TT3:%.*]] = load i32*, i32** [[ARRAYIDX_US]], align 4
-; CHECK-NEXT:    [[ARRAYIDX7_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_024_US]]
-; CHECK-NEXT:    [[ARRAYIDX7_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX7_US]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds ptr, ptr [[A:%.*]], i32 [[I_024_US]]
+; CHECK-NEXT:    [[TT3:%.*]] = load ptr, ptr [[ARRAYIDX_US]], align 4
+; CHECK-NEXT:    [[ARRAYIDX7_US:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_024_US]]
+; CHECK-NEXT:    [[ARRAYIDX7_PROMOTED_US:%.*]] = load i32, ptr [[ARRAYIDX7_US]], align 4
 ; CHECK-NEXT:    [[TT4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX7_PROMOTED_US]], i32 0
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TT2]])
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
@@ -133,14 +129,12 @@ define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B
 ; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TT4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT12:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TT5:%.*]] = phi i32 [ [[START]], [[FOR_COND1_PREHEADER_US]] ], [ [[TT13:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TT6:%.*]] = getelementptr inbounds i32, i32* [[TT3]], i32 [[INDEX]]
+; CHECK-NEXT:    [[TT6:%.*]] = getelementptr inbounds i32, ptr [[TT3]], i32 [[INDEX]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]])
 ; CHECK-NEXT:    [[TMP2]] = sub i32 [[TMP0]], 4
-; CHECK-NEXT:    [[TT8:%.*]] = bitcast i32* [[TT6]] to <4 x i32>*
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TT8]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
-; CHECK-NEXT:    [[TT9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]]
-; CHECK-NEXT:    [[TT10:%.*]] = bitcast i32* [[TT9]] to <4 x i32>*
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD29:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TT10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TT6]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[TT9:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD29:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TT9]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
 ; CHECK-NEXT:    [[TT11:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD29]], [[WIDE_MASKED_LOAD]]
 ; CHECK-NEXT:    [[TT12]] = add nsw <4 x i32> [[VEC_PHI]], [[TT11]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
@@ -150,7 +144,7 @@ define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[TT15:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TT12]], <4 x i32> [[VEC_PHI]]
 ; CHECK-NEXT:    [[TT16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TT15]])
-; CHECK-NEXT:    store i32 [[TT16]], i32* [[ARRAYIDX7_US]], align 4
+; CHECK-NEXT:    store i32 [[TT16]], ptr [[ARRAYIDX7_US]], align 4
 ; CHECK-NEXT:    [[INC9_US]] = add nuw i32 [[I_024_US]], 1
 ; CHECK-NEXT:    [[EXITCOND26:%.*]] = icmp eq i32 [[INC9_US]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND26]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]]
@@ -171,10 +165,10 @@ for.cond1.preheader.us.preheader:                 ; preds = %entry
 
 for.cond1.preheader.us:                           ; preds = %middle.block, %for.cond1.preheader.us.preheader
   %i.024.us = phi i32 [ %inc9.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ]
-  %arrayidx.us = getelementptr inbounds i32*, i32** %A, i32 %i.024.us
-  %tt3 = load i32*, i32** %arrayidx.us, align 4
-  %arrayidx7.us = getelementptr inbounds i32, i32* %C, i32 %i.024.us
-  %arrayidx7.promoted.us = load i32, i32* %arrayidx7.us, align 4
+  %arrayidx.us = getelementptr inbounds ptr, ptr %A, i32 %i.024.us
+  %tt3 = load ptr, ptr %arrayidx.us, align 4
+  %arrayidx7.us = getelementptr inbounds i32, ptr %C, i32 %i.024.us
+  %arrayidx7.promoted.us = load i32, ptr %arrayidx7.us, align 4
   %tt4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx7.promoted.us, i32 0
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %tt2)
   br label %vector.body
@@ -183,13 +177,11 @@ vector.body:                                      ; preds = %vector.body, %for.c
   %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ %tt4, %for.cond1.preheader.us ], [ %tt12, %vector.body ]
   %tt5 = phi i32 [ %start, %for.cond1.preheader.us ], [ %tt13, %vector.body ]
-  %tt6 = getelementptr inbounds i32, i32* %tt3, i32 %index
+  %tt6 = getelementptr inbounds i32, ptr %tt3, i32 %index
   %tt7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tt8 = bitcast i32* %tt6 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tt8, i32 4, <4 x i1> %tt7, <4 x i32> undef)
-  %tt9 = getelementptr inbounds i32, i32* %B, i32 %index
-  %tt10 = bitcast i32* %tt9 to <4 x i32>*
-  %wide.masked.load29 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tt10, i32 4, <4 x i1> %tt7, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tt6, i32 4, <4 x i1> %tt7, <4 x i32> undef)
+  %tt9 = getelementptr inbounds i32, ptr %B, i32 %index
+  %wide.masked.load29 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tt9, i32 4, <4 x i1> %tt7, <4 x i32> undef)
   %tt11 = mul nsw <4 x i32> %wide.masked.load29, %wide.masked.load
   %tt12 = add nsw <4 x i32> %vec.phi, %tt11
   %index.next = add i32 %index, 4
@@ -200,7 +192,7 @@ vector.body:                                      ; preds = %vector.body, %for.c
 middle.block:                                     ; preds = %vector.body
   %tt15 = select <4 x i1> %tt7, <4 x i32> %tt12, <4 x i32> %vec.phi
   %tt16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tt15)
-  store i32 %tt16, i32* %arrayidx7.us, align 4
+  store i32 %tt16, ptr %arrayidx7.us, align 4
   %inc9.us = add nuw i32 %i.024.us, 1
   %exitcond26 = icmp eq i32 %inc9.us, %N
   br i1 %exitcond26, label %for.cond.cleanup, label %for.cond1.preheader.us
@@ -211,10 +203,10 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
 
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #0
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) #0
 
 ; Function Attrs: argmemonly nounwind readonly willreturn
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #0
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>) #0
 
 ; Function Attrs: nounwind readnone willreturn
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #1

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-le-simple.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-le-simple.ll
index 78ddb35e756e3..8e8934b6e9599 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-le-simple.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-le-simple.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main %s -o - | FileCheck %s
 
-define void @cbz_exit(i32* %in, i32* %res) {
+define void @cbz_exit(ptr %in, ptr %res) {
 ; CHECK-LABEL: cbz_exit:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r2, r0, #4
@@ -20,18 +20,18 @@ entry:
 
 loop:
   %offset = phi i32 [ 0, %entry ], [ %next, %loop ]
-  %ptr = getelementptr i32, i32* %in, i32 %offset
-  %val = load i32, i32* %ptr
+  %ptr = getelementptr i32, ptr %in, i32 %offset
+  %val = load i32, ptr %ptr
   %next = add i32 %offset, 1
   %cmp = icmp eq i32 %val, 0
   br i1 %cmp, label %exit, label %loop
 
 exit:
-  store i32 %offset, i32* %res
+  store i32 %offset, ptr %res
   ret void
 }
 
-define void @cbnz_exit(i32* %in, i32* %res) {
+define void @cbnz_exit(ptr %in, ptr %res) {
 ; CHECK-LABEL: cbnz_exit:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r2, r0, #4
@@ -50,18 +50,18 @@ entry:
 
 loop:
   %offset = phi i32 [ 0, %entry ], [ %next, %loop ]
-  %ptr = getelementptr i32, i32* %in, i32 %offset
-  %val = load i32, i32* %ptr
+  %ptr = getelementptr i32, ptr %in, i32 %offset
+  %val = load i32, ptr %ptr
   %next = add i32 %offset, 1
   %cmp = icmp ne i32 %val, 0
   br i1 %cmp, label %exit, label %loop
 
 exit:
-  store i32 %offset, i32* %res
+  store i32 %offset, ptr %res
   ret void
 }
 
-define void @cbnz_exit_too_large(i32* %in, i32* %res) {
+define void @cbnz_exit_too_large(ptr %in, ptr %res) {
 ; CHECK-LABEL: cbnz_exit_too_large:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r2, r0, #4
@@ -81,19 +81,19 @@ entry:
 
 loop:
   %offset = phi i32 [ 0, %entry ], [ %next, %loop ]
-  %ptr = getelementptr i32, i32* %in, i32 %offset
-  %val = load i32, i32* %ptr
+  %ptr = getelementptr i32, ptr %in, i32 %offset
+  %val = load i32, ptr %ptr
   %next = add i32 %offset, 1
   %cmp = icmp ne i32 %val, 0
   %size = call i32 @llvm.arm.space(i32 4090, i32 undef)
   br i1 %cmp, label %exit, label %loop
 
 exit:
-  store i32 %offset, i32* %res
+  store i32 %offset, ptr %res
   ret void
 }
 
-define void @cbz_exit_minsize(i32* %in, i32* %res) #0 {
+define void @cbz_exit_minsize(ptr %in, ptr %res) #0 {
 ; CHECK-LABEL: cbz_exit_minsize:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r2, #0
@@ -112,18 +112,18 @@ entry:
 
 loop:
   %offset = phi i32 [ 0, %entry ], [ %next, %loop ]
-  %ptr = getelementptr i32, i32* %in, i32 %offset
-  %val = load i32, i32* %ptr
+  %ptr = getelementptr i32, ptr %in, i32 %offset
+  %val = load i32, ptr %ptr
   %next = add i32 %offset, 1
   %cmp = icmp eq i32 %val, 0
   br i1 %cmp, label %exit, label %loop
 
 exit:
-  store i32 %offset, i32* %res
+  store i32 %offset, ptr %res
   ret void
 }
 
-define void @cbnz_exit_minsize(i32* %in, i32* %res) #0 {
+define void @cbnz_exit_minsize(ptr %in, ptr %res) #0 {
 ; CHECK-LABEL: cbnz_exit_minsize:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r2, #0
@@ -142,14 +142,14 @@ entry:
 
 loop:
   %offset = phi i32 [ 0, %entry ], [ %next, %loop ]
-  %ptr = getelementptr i32, i32* %in, i32 %offset
-  %val = load i32, i32* %ptr
+  %ptr = getelementptr i32, ptr %in, i32 %offset
+  %val = load i32, ptr %ptr
   %next = add i32 %offset, 1
   %cmp = icmp ne i32 %val, 0
   br i1 %cmp, label %exit, label %loop
 
 exit:
-  store i32 %offset, i32* %res
+  store i32 %offset, ptr %res
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remat-vctp.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remat-vctp.ll
index 1619ccb714a98..31fcf015b01a1 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remat-vctp.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remat-vctp.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -enable-arm-maskedgatscat=false %s -o - | FileCheck %s
 
-define void @remat_vctp(i32* %arg, i32* %arg1, i32* %arg2, i32* %arg3, i32* %arg4, i16 zeroext %arg5) {
+define void @remat_vctp(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i16 zeroext %arg5) {
 ; CHECK-LABEL: remat_vctp:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
@@ -44,16 +44,14 @@ bb:
   br label %bb6
 
 bb6:                                              ; preds = %bb6, %bb
-  %i7 = phi i32* [ %arg3, %bb ], [ %i38, %bb6 ]
+  %i7 = phi ptr [ %arg3, %bb ], [ %i38, %bb6 ]
   %i8 = phi i32 [ %i, %bb ], [ %i42, %bb6 ]
-  %i9 = phi i32* [ %arg2, %bb ], [ %i41, %bb6 ]
-  %i10 = phi i32* [ %arg1, %bb ], [ %i40, %bb6 ]
-  %i11 = phi i32* [ %arg, %bb ], [ %i39, %bb6 ]
+  %i9 = phi ptr [ %arg2, %bb ], [ %i41, %bb6 ]
+  %i10 = phi ptr [ %arg1, %bb ], [ %i40, %bb6 ]
+  %i11 = phi ptr [ %arg, %bb ], [ %i39, %bb6 ]
   %i12 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i8)
-  %i13 = bitcast i32* %i11 to <4 x i32>*
-  %i14 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %i13, i32 4, <4 x i1> %i12, <4 x i32> zeroinitializer)
-  %i15 = bitcast i32* %i10 to <4 x i32>*
-  %i16 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %i15, i32 4, <4 x i1> %i12, <4 x i32> zeroinitializer)
+  %i14 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %i11, i32 4, <4 x i1> %i12, <4 x i32> zeroinitializer)
+  %i16 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %i10, i32 4, <4 x i1> %i12, <4 x i32> zeroinitializer)
   %i17 = icmp slt <4 x i32> %i16, zeroinitializer
   %i18 = sub <4 x i32> zeroinitializer, %i16
   %i19 = select <4 x i1> %i17, <4 x i32> %i18, <4 x i32> %i16
@@ -62,7 +60,7 @@ bb6:                                              ; preds = %bb6, %bb
   %i22 = add <4 x i32> %i20, <i32 1, i32 1, i32 1, i32 1>
   %i23 = lshr <4 x i32> %i21, <i32 24, i32 24, i32 24, i32 24>
   %i24 = and <4 x i32> %i23, <i32 63, i32 63, i32 63, i32 63>
-  %i25 = tail call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %arg4, <4 x i32> %i24, i32 32, i32 2, i32 0)
+  %i25 = tail call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %arg4, <4 x i32> %i24, i32 32, i32 2, i32 0)
   %i26 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i25, <4 x i32> %i21)
   %i27 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> %i26)
   %i28 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i25, <4 x i32> %i27)
@@ -73,14 +71,12 @@ bb6:                                              ; preds = %bb6, %bb
   %i33 = tail call <4 x i32> @llvm.arm.mve.vqshl.imm.v4i32(<4 x i32> %i32, i32 1, i32 0)
   %i34 = tail call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> %i33, <4 x i1> %i17, <4 x i32> %i33)
   %i35 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i14, <4 x i32> %i34)
-  %i36 = bitcast i32* %i9 to <4 x i32>*
-  %i37 = bitcast i32* %i7 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %i35, <4 x i32>* %i36, i32 4, <4 x i1> %i12)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %i22, <4 x i32>* %i37, i32 4, <4 x i1> %i12)
-  %i38 = getelementptr inbounds i32, i32* %i7, i32 4
-  %i39 = getelementptr inbounds i32, i32* %i11, i32 4
-  %i40 = getelementptr inbounds i32, i32* %i10, i32 4
-  %i41 = getelementptr inbounds i32, i32* %i9, i32 4
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %i35, ptr %i9, i32 4, <4 x i1> %i12)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %i22, ptr %i7, i32 4, <4 x i1> %i12)
+  %i38 = getelementptr inbounds i32, ptr %i7, i32 4
+  %i39 = getelementptr inbounds i32, ptr %i11, i32 4
+  %i40 = getelementptr inbounds i32, ptr %i10, i32 4
+  %i41 = getelementptr inbounds i32, ptr %i9, i32 4
   %i42 = add nsw i32 %i8, -4
   %i43 = icmp sgt i32 %i8, 4
   br i1 %i43, label %bb6, label %bb44
@@ -89,7 +85,7 @@ bb44:                                             ; preds = %bb6
   ret void
 }
 
-define void @dont_remat_predicated_vctp(i32* %arg, i32* %arg1, i32* %arg2, i32* %arg3, i32* %arg4, i16 zeroext %arg5, i32 %conv.mask) {
+define void @dont_remat_predicated_vctp(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i16 zeroext %arg5, i32 %conv.mask) {
 ; CHECK-LABEL: dont_remat_predicated_vctp:
 ; CHECK:       @ %bb.0: @ %bb
 ; CHECK-NEXT:    push {r4, r5, r6, lr}
@@ -148,18 +144,16 @@ bb:
   br label %bb6
 
 bb6:                                              ; preds = %bb6, %bb
-  %i7 = phi i32* [ %arg3, %bb ], [ %i38, %bb6 ]
+  %i7 = phi ptr [ %arg3, %bb ], [ %i38, %bb6 ]
   %i8 = phi i32 [ %i, %bb ], [ %i42, %bb6 ]
-  %i9 = phi i32* [ %arg2, %bb ], [ %i41, %bb6 ]
-  %i10 = phi i32* [ %arg1, %bb ], [ %i40, %bb6 ]
-  %i11 = phi i32* [ %arg, %bb ], [ %i39, %bb6 ]
+  %i9 = phi ptr [ %arg2, %bb ], [ %i41, %bb6 ]
+  %i10 = phi ptr [ %arg1, %bb ], [ %i40, %bb6 ]
+  %i11 = phi ptr [ %arg, %bb ], [ %i39, %bb6 ]
   %i12 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 4)
   %mask = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i8)
   %pred = and <4 x i1> %i12, %mask
-  %i13 = bitcast i32* %i11 to <4 x i32>*
-  %i14 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %i13, i32 4, <4 x i1> %pred, <4 x i32> zeroinitializer)
-  %i15 = bitcast i32* %i10 to <4 x i32>*
-  %i16 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %i15, i32 4, <4 x i1> %pred, <4 x i32> zeroinitializer)
+  %i14 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %i11, i32 4, <4 x i1> %pred, <4 x i32> zeroinitializer)
+  %i16 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %i10, i32 4, <4 x i1> %pred, <4 x i32> zeroinitializer)
   %i17 = icmp slt <4 x i32> %i16, zeroinitializer
   %i18 = sub <4 x i32> zeroinitializer, %i16
   %i19 = select <4 x i1> %i17, <4 x i32> %i18, <4 x i32> %i16
@@ -168,7 +162,7 @@ bb6:                                              ; preds = %bb6, %bb
   %i22 = add <4 x i32> %i20, <i32 1, i32 1, i32 1, i32 1>
   %i23 = lshr <4 x i32> %i21, <i32 24, i32 24, i32 24, i32 24>
   %i24 = and <4 x i32> %i23, <i32 63, i32 63, i32 63, i32 63>
-  %i25 = tail call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %arg4, <4 x i32> %i24, i32 32, i32 2, i32 0)
+  %i25 = tail call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %arg4, <4 x i32> %i24, i32 32, i32 2, i32 0)
   %i26 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i25, <4 x i32> %i21)
   %i27 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> %i26)
   %i28 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i25, <4 x i32> %i27)
@@ -179,14 +173,12 @@ bb6:                                              ; preds = %bb6, %bb
   %i33 = tail call <4 x i32> @llvm.arm.mve.vqshl.imm.v4i32(<4 x i32> %i32, i32 1, i32 0)
   %i34 = tail call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> %i33, <4 x i1> %i17, <4 x i32> %i33)
   %i35 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %i14, <4 x i32> %i34)
-  %i36 = bitcast i32* %i9 to <4 x i32>*
-  %i37 = bitcast i32* %i7 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %i35, <4 x i32>* %i36, i32 4, <4 x i1> %pred)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %i22, <4 x i32>* %i37, i32 4, <4 x i1> %pred)
-  %i38 = getelementptr inbounds i32, i32* %i7, i32 4
-  %i39 = getelementptr inbounds i32, i32* %i11, i32 4
-  %i40 = getelementptr inbounds i32, i32* %i10, i32 4
-  %i41 = getelementptr inbounds i32, i32* %i9, i32 4
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %i35, ptr %i9, i32 4, <4 x i1> %pred)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %i22, ptr %i7, i32 4, <4 x i1> %pred)
+  %i38 = getelementptr inbounds i32, ptr %i7, i32 4
+  %i39 = getelementptr inbounds i32, ptr %i11, i32 4
+  %i40 = getelementptr inbounds i32, ptr %i10, i32 4
+  %i41 = getelementptr inbounds i32, ptr %i9, i32 4
   %i42 = add nsw i32 %i8, -4
   %i43 = icmp sgt i32 %i8, 4
   br i1 %i43, label %bb6, label %bb44
@@ -197,11 +189,11 @@ bb44:                                             ; preds = %bb6
 
 declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
 declare <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 declare <4 x i32> @llvm.arm.mve.vcls.v4i32(<4 x i32>)
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32*, <4 x i32>, i32, i32, i32)
+declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr, <4 x i32>, i32, i32, i32)
 declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
 declare <4 x i32> @llvm.arm.mve.vqshl.imm.v4i32(<4 x i32>, i32, i32)
 declare <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32>, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/sibling-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/sibling-loops.ll
index 4e72918f63f62..caf7a339805fc 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/sibling-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/sibling-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+lob --verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @test(i16* noalias nocapture readonly %off, i16* noalias nocapture %data, i16* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @test(ptr noalias nocapture readonly %off, ptr noalias nocapture %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: test:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
@@ -65,28 +65,28 @@ for.cond.cleanup14.us: ; preds = %for.body15.us
 
 for.body15.us: ; preds = %for.body4.us, %for.body15.us
   %j10.055.us = phi i32 [ %inc26.us, %for.body15.us ], [ 0, %for.body4.us ]
-  %arrayidx16.us = getelementptr inbounds i16, i16* %off, i32 %j10.055.us
-  %0 = load i16, i16* %arrayidx16.us, align 2
-  %arrayidx18.us = getelementptr inbounds i16, i16* %data, i32 %j10.055.us
-  %1 = load i16, i16* %arrayidx18.us, align 2
+  %arrayidx16.us = getelementptr inbounds i16, ptr %off, i32 %j10.055.us
+  %0 = load i16, ptr %arrayidx16.us, align 2
+  %arrayidx18.us = getelementptr inbounds i16, ptr %data, i32 %j10.055.us
+  %1 = load i16, ptr %arrayidx18.us, align 2
   %add20.us = add i16 %1, %0
   %add23.us = add i32 %j10.055.us, %mul.us
-  %arrayidx24.us = getelementptr inbounds i16, i16* %dst, i32 %add23.us
-  store i16 %add20.us, i16* %arrayidx24.us, align 2
+  %arrayidx24.us = getelementptr inbounds i16, ptr %dst, i32 %add23.us
+  store i16 %add20.us, ptr %arrayidx24.us, align 2
   %inc26.us = add nuw nsw i32 %j10.055.us, 1
   %exitcond93 = icmp eq i32 %inc26.us, %n
   br i1 %exitcond93, label %for.cond.cleanup14.us, label %for.body15.us
 
 for.body4.us: ; preds = %for.body4.us, %for.cond1.preheader.us
   %j.053.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
-  %arrayidx.us = getelementptr inbounds i16, i16* %off, i32 %j.053.us
-  %2 = load i16, i16* %arrayidx.us, align 2
-  %arrayidx5.us = getelementptr inbounds i16, i16* %data, i32 %j.053.us
-  %3 = load i16, i16* %arrayidx5.us, align 2
+  %arrayidx.us = getelementptr inbounds i16, ptr %off, i32 %j.053.us
+  %2 = load i16, ptr %arrayidx.us, align 2
+  %arrayidx5.us = getelementptr inbounds i16, ptr %data, i32 %j.053.us
+  %3 = load i16, ptr %arrayidx5.us, align 2
   %add.us = add i16 %3, %2
   %add8.us = add i32 %j.053.us, %mul.us
-  %arrayidx9.us = getelementptr inbounds i16, i16* %data, i32 %add8.us
-  store i16 %add.us, i16* %arrayidx9.us, align 2
+  %arrayidx9.us = getelementptr inbounds i16, ptr %data, i32 %add8.us
+  store i16 %add.us, ptr %arrayidx9.us, align 2
   %inc.us = add nuw nsw i32 %j.053.us, 1
   %exitcond = icmp eq i32 %inc.us, %n
   br i1 %exitcond, label %for.body15.us, label %for.body4.us

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll
index d10cbffe2dd24..c2b4494d148a9 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-basic.ll
@@ -7,10 +7,10 @@
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <16 x i1> @llvm.arm.mve.vctp8(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 16
-; CHECK: [[LD0:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef)
-; CHECK: tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> {{.*}}, <16 x i8>* {{.*}}, i32 4, <16 x i1> [[VCTP]])
-define dso_local arm_aapcs_vfpcc void @mul_v16i8(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr {{.*}}, i32 4, <16 x i1> [[VCTP]], <16 x i8> undef)
+; CHECK: tail call void @llvm.masked.store.v16i8.p0(<16 x i8> {{.*}}, ptr {{.*}}, i32 4, <16 x i1> [[VCTP]])
+define dso_local arm_aapcs_vfpcc void @mul_v16i8(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 15
@@ -28,17 +28,14 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i8, i8* %a, i32 %index
+  %tmp = getelementptr inbounds i8, ptr %a, i32 %index
   %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i8* %tmp to <16 x i8>*
-  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2, i32 4, <16 x i1> %active.lane.mask, <16 x i8> undef)
-  %tmp3 = getelementptr inbounds i8, i8* %b, i32 %index
-  %tmp4 = bitcast i8* %tmp3 to <16 x i8>*
-  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4, i32 4, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp, i32 4, <16 x i1> %active.lane.mask, <16 x i8> undef)
+  %tmp3 = getelementptr inbounds i8, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp3, i32 4, <16 x i1> %active.lane.mask, <16 x i8> undef)
   %mul = mul nsw <16 x i8> %wide.masked.load2, %wide.masked.load
-  %tmp6 = getelementptr inbounds i8, i8* %c, i32 %index
-  %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
-  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %mul, <16 x i8>* %tmp7, i32 4, <16 x i1> %active.lane.mask)
+  %tmp6 = getelementptr inbounds i8, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v16i8.p0(<16 x i8> %mul, ptr %tmp6, i32 4, <16 x i1> %active.lane.mask)
   %index.next = add i32 %index, 16
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -55,10 +52,10 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 8
-; CHECK: [[LD0:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
-; CHECK: tail call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> {{.*}}, <8 x i16>* {{.*}}, i32 4, <8 x i1> [[VCTP]])
-define dso_local arm_aapcs_vfpcc void @mul_v8i16(i16* noalias nocapture readonly %a, i16* noalias nocapture readonly %b, i16* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
+; CHECK: tail call void @llvm.masked.store.v8i16.p0(<8 x i16> {{.*}}, ptr {{.*}}, i32 4, <8 x i1> [[VCTP]])
+define dso_local arm_aapcs_vfpcc void @mul_v8i16(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 7
@@ -76,17 +73,14 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i16, i16* %a, i32 %index
+  %tmp = getelementptr inbounds i16, ptr %a, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i16* %tmp to <8 x i16>*
-  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp2, i32 4, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %tmp3 = getelementptr inbounds i16, i16* %b, i32 %index
-  %tmp4 = bitcast i16* %tmp3 to <8 x i16>*
-  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp, i32 4, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %tmp3 = getelementptr inbounds i16, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp3, i32 4, <8 x i1> %active.lane.mask, <8 x i16> undef)
   %mul = mul nsw <8 x i16> %wide.masked.load2, %wide.masked.load
-  %tmp6 = getelementptr inbounds i16, i16* %c, i32 %index
-  %tmp7 = bitcast i16* %tmp6 to <8 x i16>*
-  tail call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %mul, <8 x i16>* %tmp7, i32 4, <8 x i1> %active.lane.mask)
+  %tmp6 = getelementptr inbounds i16, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v8i16.p0(<8 x i16> %mul, ptr %tmp6, i32 4, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -102,10 +96,10 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4
-; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]])
-define dso_local arm_aapcs_vfpcc void @mul_v4i32(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]])
+define dso_local arm_aapcs_vfpcc void @mul_v4i32(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -123,17 +117,14 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %mul = mul nsw <4 x i32> %wide.masked.load2, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %mul, <4 x i32>* %tmp7, i32 4, <4 x i1> %active.lane.mask)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %mul, ptr %tmp6, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -150,10 +141,10 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4
-; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]])
-define dso_local arm_aapcs_vfpcc void @split_vector(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]])
+define dso_local arm_aapcs_vfpcc void @split_vector(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -171,23 +162,20 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %extract.1.low = shufflevector <4 x i32> %wide.masked.load, <4 x i32> undef, < 2 x i32> < i32 0, i32 2>
   %extract.1.high = shufflevector <4 x i32> %wide.masked.load, <4 x i32> undef, < 2 x i32> < i32 1, i32 3>
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %extract.2.low = shufflevector <4 x i32> %wide.masked.load2, <4 x i32> undef, < 2 x i32> < i32 0, i32 2>
   %extract.2.high = shufflevector <4 x i32> %wide.masked.load2, <4 x i32> undef, < 2 x i32> < i32 1, i32 3>
   %mul = mul nsw <2 x i32> %extract.1.low, %extract.2.low
   %sub = sub nsw <2 x i32> %extract.1.high, %extract.2.high
   %combine = shufflevector <2 x i32> %mul, <2 x i32> %sub, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %combine, <4 x i32>* %tmp7, i32 4, <4 x i1> %active.lane.mask)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %combine, ptr %tmp6, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -202,10 +190,10 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4
-; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> %wrong, <4 x i32> undef)
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]])
-define dso_local arm_aapcs_vfpcc void @mismatch_load_pred(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> %wrong, <4 x i32> undef)
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> [[VCTP]])
+define dso_local arm_aapcs_vfpcc void @mismatch_load_pred(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -229,18 +217,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
   %wrong = icmp ult <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %wrong, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %wrong, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %active.lane.mask)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -258,10 +243,10 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[REMAINING:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELEMS]])
 ; CHECK: [[REMAINING]] = sub i32 [[ELEMS]], 4
-; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> %wrong)
-define dso_local arm_aapcs_vfpcc void @mismatch_store_pred(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+; CHECK: [[LD0:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: [[LD1:%[^ ]+]] = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]], <4 x i32> undef)
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %wrong)
+define dso_local arm_aapcs_vfpcc void @mismatch_store_pred(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -285,18 +270,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
   %wrong = icmp ult <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %wrong)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %wrong)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -319,7 +301,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK:  %active.lane.mask{{.*}} = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v8, i32 %N)
 ; CHECK:  %active.lane.mask{{.*}} = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v9, i32 %N)
 ;
-define dso_local void @interleave4(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @interleave4(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 entry:
   %cmp8 = icmp sgt i32 %N, 0
   %v0 = add i32 %N, 15
@@ -332,21 +314,18 @@ entry:
 
 
 vector.ph:
-  %scevgep = getelementptr i32, i32* %A, i32 8
-  %scevgep30 = getelementptr i32, i32* %C, i32 8
-  %scevgep37 = getelementptr i32, i32* %B, i32 8
+  %scevgep = getelementptr i32, ptr %A, i32 8
+  %scevgep30 = getelementptr i32, ptr %C, i32 8
+  %scevgep37 = getelementptr i32, ptr %B, i32 8
   %start = call i32 @llvm.start.loop.iterations.i32(i32 %v5)
   br label %vector.body
 
 vector.body:
-  %lsr.iv38 = phi i32* [ %scevgep39, %vector.body ], [ %scevgep37, %vector.ph ]
-  %lsr.iv31 = phi i32* [ %scevgep32, %vector.body ], [ %scevgep30, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep25, %vector.body ], [ %scevgep, %vector.ph ]
+  %lsr.iv38 = phi ptr [ %scevgep39, %vector.body ], [ %scevgep37, %vector.ph ]
+  %lsr.iv31 = phi ptr [ %scevgep32, %vector.body ], [ %scevgep30, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep25, %vector.body ], [ %scevgep, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %v14, %vector.body ]
   %v6 = phi i32 [ %start, %vector.ph ], [ %v15, %vector.body ]
-  %lsr.iv3840 = bitcast i32* %lsr.iv38 to <4 x i32>*
-  %lsr.iv3133 = bitcast i32* %lsr.iv31 to <4 x i32>*
-  %lsr.iv26 = bitcast i32* %lsr.iv to <4 x i32>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
   %v7 = add i32 %index, 4
   %active.lane.mask15 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v7, i32 %N)
@@ -354,34 +333,34 @@ vector.body:
   %active.lane.mask16 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v8, i32 %N)
   %v9 = add i32 %v8, 4
   %active.lane.mask17 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %v9, i32 %N)
-  %scevgep42 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3840, i32 -2
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %scevgep42, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %scevgep43 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3840, i32 -1
-  %wide.masked.load18 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %scevgep43, i32 4, <4 x i1> %active.lane.mask15, <4 x i32> undef)
-  %wide.masked.load19 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %lsr.iv3840, i32 4, <4 x i1> %active.lane.mask16, <4 x i32> undef)
-  %scevgep41 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3840, i32 1
-  %wide.masked.load20 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %scevgep41, i32 4, <4 x i1> %active.lane.mask17, <4 x i32> undef)
-  %scevgep34 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3133, i32 -2
-  %wide.masked.load21 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %scevgep34, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %scevgep35 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3133, i32 -1
-  %wide.masked.load22 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %scevgep35, i32 4, <4 x i1> %active.lane.mask15, <4 x i32> undef)
-  %wide.masked.load23 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %lsr.iv3133, i32 4, <4 x i1> %active.lane.mask16, <4 x i32> undef)
-  %scevgep36 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv3133, i32 1
-  %wide.masked.load24 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* nonnull %scevgep36, i32 4, <4 x i1> %active.lane.mask17, <4 x i32> undef)
+  %scevgep42 = getelementptr <4 x i32>, ptr %lsr.iv38, i32 -2
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %scevgep42, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %scevgep43 = getelementptr <4 x i32>, ptr %lsr.iv38, i32 -1
+  %wide.masked.load18 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %scevgep43, i32 4, <4 x i1> %active.lane.mask15, <4 x i32> undef)
+  %wide.masked.load19 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %lsr.iv38, i32 4, <4 x i1> %active.lane.mask16, <4 x i32> undef)
+  %scevgep41 = getelementptr <4 x i32>, ptr %lsr.iv38, i32 1
+  %wide.masked.load20 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %scevgep41, i32 4, <4 x i1> %active.lane.mask17, <4 x i32> undef)
+  %scevgep34 = getelementptr <4 x i32>, ptr %lsr.iv31, i32 -2
+  %wide.masked.load21 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %scevgep34, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %scevgep35 = getelementptr <4 x i32>, ptr %lsr.iv31, i32 -1
+  %wide.masked.load22 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %scevgep35, i32 4, <4 x i1> %active.lane.mask15, <4 x i32> undef)
+  %wide.masked.load23 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %lsr.iv31, i32 4, <4 x i1> %active.lane.mask16, <4 x i32> undef)
+  %scevgep36 = getelementptr <4 x i32>, ptr %lsr.iv31, i32 1
+  %wide.masked.load24 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr nonnull %scevgep36, i32 4, <4 x i1> %active.lane.mask17, <4 x i32> undef)
   %v10 = add nsw <4 x i32> %wide.masked.load21, %wide.masked.load
   %v11 = add nsw <4 x i32> %wide.masked.load22, %wide.masked.load18
   %v12 = add nsw <4 x i32> %wide.masked.load23, %wide.masked.load19
   %v13 = add nsw <4 x i32> %wide.masked.load24, %wide.masked.load20
-  %scevgep27 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv26, i32 -2
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v10, <4 x i32>* %scevgep27, i32 4, <4 x i1> %active.lane.mask)
-  %scevgep28 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv26, i32 -1
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v11, <4 x i32>* %scevgep28, i32 4, <4 x i1> %active.lane.mask15)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v12, <4 x i32>* %lsr.iv26, i32 4, <4 x i1> %active.lane.mask16)
-  %scevgep29 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv26, i32 1
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v13, <4 x i32>* %scevgep29, i32 4, <4 x i1> %active.lane.mask17)
-  %scevgep25 = getelementptr i32, i32* %lsr.iv, i32 16
-  %scevgep32 = getelementptr i32, i32* %lsr.iv31, i32 16
-  %scevgep39 = getelementptr i32, i32* %lsr.iv38, i32 16
+  %scevgep27 = getelementptr <4 x i32>, ptr %lsr.iv, i32 -2
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v10, ptr %scevgep27, i32 4, <4 x i1> %active.lane.mask)
+  %scevgep28 = getelementptr <4 x i32>, ptr %lsr.iv, i32 -1
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v11, ptr %scevgep28, i32 4, <4 x i1> %active.lane.mask15)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v12, ptr %lsr.iv, i32 4, <4 x i1> %active.lane.mask16)
+  %scevgep29 = getelementptr <4 x i32>, ptr %lsr.iv, i32 1
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v13, ptr %scevgep29, i32 4, <4 x i1> %active.lane.mask17)
+  %scevgep25 = getelementptr i32, ptr %lsr.iv, i32 16
+  %scevgep32 = getelementptr i32, ptr %lsr.iv31, i32 16
+  %scevgep39 = getelementptr i32, ptr %lsr.iv38, i32 16
   %v14 = add i32 %v9, 4
   %v15 = call i32 @llvm.loop.decrement.reg.i32(i32 %v6, i32 1)
   %v16 = icmp ne i32 %v15, 0
@@ -396,7 +375,7 @@ for.cond.cleanup:
 ; CHECK-NOT:   vctp
 ; CHECK:       ret void
 ;
-define dso_local void @const_expected_in_set_loop(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @const_expected_in_set_loop(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 entry:
   %cmp8 = icmp sgt i32 %N, 0
   %0 = add i32 %N, 3
@@ -412,23 +391,20 @@ vector.ph:
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv17 = phi i32* [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %vector.ph ]
+  %lsr.iv17 = phi ptr [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = phi i32 [ %start, %vector.ph ], [ %8, %vector.body ]
-  %lsr.iv13 = bitcast i32* %lsr.iv to <4 x i32>*
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1719 = bitcast i32* %lsr.iv17 to <4 x i32>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 42)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv13, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv14, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %7 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %7, <4 x i32>* %lsr.iv1719, i32 4, <4 x i1> %active.lane.mask)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %7, ptr %lsr.iv17, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
-  %scevgep18 = getelementptr i32, i32* %lsr.iv17, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
+  %scevgep18 = getelementptr i32, ptr %lsr.iv17, i32 4
   %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %6, i32 1)
   %9 = icmp ne i32 %8, 0
   br i1 %9, label %vector.body, label %for.cond.cleanup
@@ -442,7 +418,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK-NOT:   vctp
 ; CHECK:       ret void
 ;
-define dso_local void @tripcount_arg_not_invariant(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @tripcount_arg_not_invariant(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 entry:
   %cmp8 = icmp sgt i32 %N, 0
   %0 = add i32 %N, 3
@@ -458,24 +434,21 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv17 = phi i32* [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %vector.ph ]
+  %lsr.iv17 = phi ptr [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = phi i32 [ %start, %vector.ph ], [ %8, %vector.body ]
 
-  %lsr.iv13 = bitcast i32* %lsr.iv to <4 x i32>*
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1719 = bitcast i32* %lsr.iv17 to <4 x i32>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %index)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv13, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv14, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %7 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %7, <4 x i32>* %lsr.iv1719, i32 4, <4 x i1> %active.lane.mask)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %7, ptr %lsr.iv17, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
-  %scevgep18 = getelementptr i32, i32* %lsr.iv17, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
+  %scevgep18 = getelementptr i32, ptr %lsr.iv17, i32 4
   %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %6, i32 1)
   %9 = icmp ne i32 %8, 0
   ;br i1 %9, label %vector.body, label %for.cond.cleanup
@@ -490,7 +463,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK-NOT:   vctp
 ; CHECK:       ret void
 ;
-define dso_local void @addrec_base_not_zero(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @addrec_base_not_zero(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 entry:
   %cmp8 = icmp sgt i32 %N, 0
   %0 = add i32 %N, 3
@@ -506,26 +479,23 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv17 = phi i32* [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %vector.ph ]
+  %lsr.iv17 = phi ptr [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %vector.ph ]
 
 ; AddRec base is not 0:
   %index = phi i32 [ 1, %vector.ph ], [ %index.next, %vector.body ]
 
   %6 = phi i32 [ %start, %vector.ph ], [ %8, %vector.body ]
-  %lsr.iv13 = bitcast i32* %lsr.iv to <4 x i32>*
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1719 = bitcast i32* %lsr.iv17 to <4 x i32>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv13, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv14, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %7 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %7, <4 x i32>* %lsr.iv1719, i32 4, <4 x i1> %active.lane.mask)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %7, ptr %lsr.iv17, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
-  %scevgep18 = getelementptr i32, i32* %lsr.iv17, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
+  %scevgep18 = getelementptr i32, ptr %lsr.iv17, i32 4
   %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %6, i32 1)
   %9 = icmp ne i32 %8, 0
   ;br i1 %9, label %vector.body, label %for.cond.cleanup
@@ -536,14 +506,14 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32 immarg, <2 x i1>)
-declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32 immarg, <2 x i1>, <2 x i64>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v2i64.p0(<2 x i64>, ptr, i32 immarg, <2 x i1>)
+declare <2 x i64> @llvm.masked.load.v2i64.p0(ptr, i32 immarg, <2 x i1>, <2 x i64>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll
index 1c173e9dfd1db..6342e288647c2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll
@@ -1,29 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve %s -S -o - | FileCheck %s
 
-define dso_local void @foo(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @foo(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 8001)
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
-; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi ptr [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[START]], [[ENTRY]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP1:%.*]] = phi i32 [ 32003, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP1]])
 ; CHECK-NEXT:    [[TMP3]] = sub i32 [[TMP1]], 4
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV11]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef)
 ; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP4]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP2]])
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
-; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
-; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP4]], ptr [[LSR_IV14]], i32 4, <4 x i1> [[TMP2]])
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4
+; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, ptr [[LSR_IV11]], i32 4
+; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4
 ; CHECK-NEXT:    [[TMP5]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP0]], i32 1)
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
@@ -35,23 +32,20 @@ entry:
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 32003)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -63,26 +57,23 @@ for.cond.cleanup:
 ; Silly test case: the loop count is constant and a multiple of the vectorisation
 ; factor. So, the vectoriser should not produce masked loads/stores and there's
 ; nothing to tail-predicate here, just checking.
-define dso_local void @foo2(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @foo2(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: @foo2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 2000)
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
-; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi ptr [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[START]], [[ENTRY]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV10]], align 4
-; CHECK-NEXT:    [[WIDE_LOAD9:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV1113]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[LSR_IV]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD9:%.*]] = load <4 x i32>, ptr [[LSR_IV11]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD9]], [[WIDE_LOAD]]
-; CHECK-NEXT:    store <4 x i32> [[TMP1]], <4 x i32>* [[LSR_IV1416]], align 4
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
-; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
-; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[LSR_IV14]], align 4
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4
+; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, ptr [[LSR_IV11]], i32 4
+; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4
 ; CHECK-NEXT:    [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP0]], i32 1)
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
 ; CHECK-NEXT:    br i1 [[TMP3]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
@@ -94,20 +85,17 @@ entry:
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %0 = phi i32 [ %start, %entry ], [ %2, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %lsr.iv10, align 4
-  %wide.load9 = load <4 x i32>, <4 x i32>* %lsr.iv1113, align 4
+  %wide.load = load <4 x i32>, ptr %lsr.iv, align 4
+  %wide.load9 = load <4 x i32>, ptr %lsr.iv11, align 4
   %1 = add nsw <4 x i32> %wide.load9, %wide.load
-  store <4 x i32> %1, <4 x i32>* %lsr.iv1416, align 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  store <4 x i32> %1, ptr %lsr.iv14, align 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %2 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %3 = icmp ne i32 %2, 0
   br i1 %3, label %vector.body, label %for.cond.cleanup
@@ -117,32 +105,29 @@ for.cond.cleanup:
 }
 
 ; Check that the icmp is a ult
-define dso_local void @foo3(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @foo3(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: @foo3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 8001)
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
-; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi ptr [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[START]], [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
 ; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
 ; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt <4 x i32> [[INDUCTION]], <i32 32002, i32 32002, i32 32002, i32 32002>
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV11]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
 ; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP2]], ptr [[LSR_IV14]], i32 4, <4 x i1> [[TMP1]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
-; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
-; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4
+; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, ptr [[LSR_IV11]], i32 4
+; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4
 ; CHECK-NEXT:    [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP0]], i32 1)
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
@@ -154,14 +139,11 @@ entry:
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
@@ -169,14 +151,14 @@ vector.body:
 ; UGT here:
   %1 = icmp ugt <4 x i32> %induction, <i32 32002, i32 32002, i32 32002, i32 32002>
 
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -185,32 +167,29 @@ for.cond.cleanup:
   ret void
 }
 
-define dso_local void @foo5(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @foo5(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 ; CHECK-LABEL: @foo5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[START:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 8001)
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
-; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV14:%.*]] = phi ptr [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV11:%.*]] = phi ptr [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ]
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[START]], [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>*
-; CHECK-NEXT:    [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>*
 ; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
 ; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult <4 x i32> [[INDUCTION]], <i32 0, i32 3200, i32 32002, i32 32002>
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[LSR_IV11]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
 ; CHECK-NEXT:    [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]]
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP2]], ptr [[LSR_IV14]], i32 4, <4 x i1> [[TMP1]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
-; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4
-; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4
-; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4
+; CHECK-NEXT:    [[SCEVGEP]] = getelementptr i32, ptr [[LSR_IV]], i32 4
+; CHECK-NEXT:    [[SCEVGEP12]] = getelementptr i32, ptr [[LSR_IV11]], i32 4
+; CHECK-NEXT:    [[SCEVGEP15]] = getelementptr i32, ptr [[LSR_IV14]], i32 4
 ; CHECK-NEXT:    [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP0]], i32 1)
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
@@ -222,26 +201,23 @@ entry:
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
   %1 = icmp ult <4 x i32> %induction, <i32 0, i32 3200, i32 32002, i32 32002>
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -256,30 +232,27 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @inconsistent_tripcounts(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @inconsistent_tripcounts(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 8001)
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
 ; BTC = UINT_MAX, and scalar trip count BTC + 1 would overflow:
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 4294967295)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -294,29 +267,26 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @overflow_in_sub(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @overflow_in_sub(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 1073741824)
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 32003)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -332,30 +302,27 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @IV_not_an_induction(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @IV_not_an_induction(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 8001)
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
 ; The induction variable %N is not an IV:
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %N, i32 32003)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -370,33 +337,30 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @IV_wrong_step(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @IV_wrong_step(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 8001)
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 32003)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
 
 ; %index is incremented with 3 and not 4, which is the vectorisation factor
 ; that we expect here:
   %index.next = add i32 %index, 3
 
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -411,32 +375,29 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @IV_step_not_constant(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
+define dso_local void @IV_step_not_constant(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, ptr noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 {
 entry:
   %start = call i32 @llvm.start.loop.iterations.i32(i32 8001)
   br label %vector.body
 
 vector.body:
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ]
-  %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %A, %entry ]
+  %lsr.iv11 = phi ptr [ %scevgep12, %vector.body ], [ %C, %entry ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %entry ]
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %entry ], [ %3, %vector.body ]
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>*
-  %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>*
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 32003)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef)
-  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv11, i32 4, <4 x i1> %1, <4 x i32> undef)
   %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %lsr.iv14, i32 4, <4 x i1> %1)
 
 ; %index is incremented with some runtime value, i.e. not a constant:
   %index.next = add i32 %index, %N
 
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep12 = getelementptr i32, ptr %lsr.iv11, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
   %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %4 = icmp ne i32 %3, 0
   br i1 %4, label %vector.body, label %for.cond.cleanup
@@ -451,7 +412,7 @@ for.cond.cleanup:
 ; CHECK:       @llvm.get.active.lane.mask
 ; CHECK:       ret void
 ;
-define dso_local void @outerloop_phi(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @outerloop_phi(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 entry:
   %cmp24 = icmp eq i32 %N, 0
   br i1 %cmp24, label %for.cond.cleanup, label %vector.ph.preheader
@@ -460,32 +421,29 @@ vector.ph.preheader:                              ; preds = %entry
   br label %vector.ph
 
 vector.ph:                                        ; preds = %vector.ph.preheader, %for.cond.cleanup3
-  %lsr.iv36 = phi i32* [ %B, %vector.ph.preheader ], [ %scevgep37, %for.cond.cleanup3 ]
-  %lsr.iv31 = phi i32* [ %C, %vector.ph.preheader ], [ %scevgep32, %for.cond.cleanup3 ]
-  %lsr.iv = phi i32* [ %A, %vector.ph.preheader ], [ %scevgep, %for.cond.cleanup3 ]
+  %lsr.iv36 = phi ptr [ %B, %vector.ph.preheader ], [ %scevgep37, %for.cond.cleanup3 ]
+  %lsr.iv31 = phi ptr [ %C, %vector.ph.preheader ], [ %scevgep32, %for.cond.cleanup3 ]
+  %lsr.iv = phi ptr [ %A, %vector.ph.preheader ], [ %scevgep, %for.cond.cleanup3 ]
   %j.025 = phi i32 [ %inc11, %for.cond.cleanup3 ], [ 0, %vector.ph.preheader ]
   %start = call i32 @llvm.start.loop.iterations.i32(i32 1025)
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv38 = phi i32* [ %scevgep39, %vector.body ], [ %lsr.iv36, %vector.ph ]
-  %lsr.iv33 = phi i32* [ %scevgep34, %vector.body ], [ %lsr.iv31, %vector.ph ]
-  %lsr.iv28 = phi i32* [ %scevgep29, %vector.body ], [ %lsr.iv, %vector.ph ]
+  %lsr.iv38 = phi ptr [ %scevgep39, %vector.body ], [ %lsr.iv36, %vector.ph ]
+  %lsr.iv33 = phi ptr [ %scevgep34, %vector.body ], [ %lsr.iv31, %vector.ph ]
+  %lsr.iv28 = phi ptr [ %scevgep29, %vector.body ], [ %lsr.iv, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %0 = phi i32 [ %start, %vector.ph ], [ %2, %vector.body ]
-  %lsr.iv3840 = bitcast i32* %lsr.iv38 to <4 x i32>*
-  %lsr.iv3335 = bitcast i32* %lsr.iv33 to <4 x i32>*
-  %lsr.iv2830 = bitcast i32* %lsr.iv28 to <4 x i32>*
 ; It's using %j.025, the induction variable from its outer loop:
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %j.025, i32 4096)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv3840, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %wide.masked.load27 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv3335, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv38, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load27 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv33, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %1 = add nsw <4 x i32> %wide.masked.load27, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %lsr.iv2830, i32 4, <4 x i1> %active.lane.mask)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %1, ptr %lsr.iv28, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %scevgep29 = getelementptr i32, i32* %lsr.iv28, i32 4
-  %scevgep34 = getelementptr i32, i32* %lsr.iv33, i32 4
-  %scevgep39 = getelementptr i32, i32* %lsr.iv38, i32 4
+  %scevgep29 = getelementptr i32, ptr %lsr.iv28, i32 4
+  %scevgep34 = getelementptr i32, ptr %lsr.iv33, i32 4
+  %scevgep39 = getelementptr i32, ptr %lsr.iv38, i32 4
   %2 = call i32 @llvm.loop.decrement.reg.i32(i32 %0, i32 1)
   %3 = icmp ne i32 %2, 0
   br i1 %3, label %vector.body, label %for.cond.cleanup3
@@ -495,16 +453,16 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup3,
 
 for.cond.cleanup3:                                ; preds = %vector.body
   %inc11 = add nuw i32 %j.025, 1
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 1
-  %scevgep32 = getelementptr i32, i32* %lsr.iv31, i32 1
-  %scevgep37 = getelementptr i32, i32* %lsr.iv36, i32 1
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 1
+  %scevgep32 = getelementptr i32, ptr %lsr.iv31, i32 1
+  %scevgep37 = getelementptr i32, ptr %lsr.iv36, i32 1
   %exitcond26 = icmp eq i32 %inc11, %N
   br i1 %exitcond26, label %for.cond.cleanup, label %vector.ph
 }
 
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #1
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) #1
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #2
 declare i32 @llvm.loop.decrement.reg.i32(i32 , i32 )
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-disabled-in-loloops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-disabled-in-loloops.ll
index 8dc18e5593a2b..a0e690212d5a4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-disabled-in-loloops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-disabled-in-loloops.ll
@@ -7,7 +7,7 @@
 ; RUN:     -arm-loloops-disable-tailpred %s -o - --verify-machineinstrs | \
 ; RUN:     FileCheck %s --check-prefix=DISABLED
 
-define dso_local void @check_option(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
+define dso_local void @check_option(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
 ; ENABLED-LABEL: check_option:
 ; ENABLED:       @ %bb.0: @ %entry
 ; ENABLED-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
@@ -86,23 +86,20 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv17 = phi i32* [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
-  %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %vector.ph ]
+  %lsr.iv17 = phi ptr [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
+  %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = phi i32 [ %start, %vector.ph ], [ %8, %vector.body ]
-  %lsr.iv13 = bitcast i32* %lsr.iv to <4 x i32>*
-  %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>*
-  %lsr.iv1719 = bitcast i32* %lsr.iv17 to <4 x i32>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv13, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv14, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %7 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %7, <4 x i32>* %lsr.iv1719, i32 4, <4 x i1> %active.lane.mask)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %7, ptr %lsr.iv17, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4
-  %scevgep18 = getelementptr i32, i32* %lsr.iv17, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
+  %scevgep18 = getelementptr i32, ptr %lsr.iv17, i32 4
   %8 = call i32 @llvm.loop.decrement.reg.i32(i32 %6, i32 1)
   %9 = icmp ne i32 %8, 0
   ;br i1 %9, label %vector.body, label %for.cond.cleanup
@@ -112,8 +109,8 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-add-sat.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-add-sat.ll
index 14f1d0c002048..0ff05f28017b4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-add-sat.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-add-sat.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc void @uadd_sat(i16* noalias nocapture readonly %pSrcA, i16* noalias nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %blockSize) {
+define arm_aapcs_vfpcc void @uadd_sat(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: uadd_sat:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -31,26 +31,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i16, i16* %pDst, i32 %index
-  %next.gep21 = getelementptr i16, i16* %pSrcB, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i16, ptr %pDst, i32 %index
+  %next.gep21 = getelementptr i16, ptr %pSrcB, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %1 = bitcast i16* %next.gep21 to <8 x i16>*
-  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %2 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
-  %3 = bitcast i16* %next.gep20 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %2, <8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask)
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep21, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %0 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %next.gep20, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @sadd_sat(i16* noalias nocapture readonly %pSrcA, i16* noalias nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %blockSize) {
+define arm_aapcs_vfpcc void @sadd_sat(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: sadd_sat:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -80,20 +77,17 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i16, i16* %pDst, i32 %index
-  %next.gep21 = getelementptr i16, i16* %pSrcB, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i16, ptr %pDst, i32 %index
+  %next.gep21 = getelementptr i16, ptr %pSrcB, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %1 = bitcast i16* %next.gep21 to <8 x i16>*
-  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %2 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
-  %3 = bitcast i16* %next.gep20 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %2, <8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask)
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep21, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %0 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %next.gep20, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret void
@@ -101,10 +95,10 @@ while.end:                                        ; preds = %vector.body, %entry
 
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
 
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
 
 declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
 declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-fabs.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-fabs.ll
index 66216022d6473..5d05819b45e47 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-fabs.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-fabs.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc void @fabs(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %blockSize) {
+define arm_aapcs_vfpcc void @fabs(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: fabs:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -30,17 +30,15 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pDst, i32 %index
-  %next.gep13 = getelementptr float, float* %pSrcA, i32 %index
+  %next.gep = getelementptr float, ptr %pDst, i32 %index
+  %next.gep13 = getelementptr float, ptr %pSrcA, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast float* %next.gep13 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep13, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret void
@@ -48,8 +46,8 @@ while.end:                                        ; preds = %vector.body, %entry
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
 
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 
 declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
 
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-round.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-round.ll
index b01a0cc047c29..f4372203e0118 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-round.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-round.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc void @round(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @round(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: round:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -30,23 +30,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.round.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.round.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @rint(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @rint(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: rint:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -75,23 +73,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.rint.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.rint.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @trunc(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: trunc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -120,23 +116,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.trunc.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.trunc.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ceil(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @ceil(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: ceil:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -165,23 +159,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @floor(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @floor(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: floor:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -210,24 +202,22 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.floor.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.floor.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 ; nearbyint shouldn't be tail predicated because it's lowered into multiple instructions
-define arm_aapcs_vfpcc void @nearbyint(float* noalias nocapture readonly %pSrcA, float* noalias nocapture %pDst, i32 %n) #0 {
+define arm_aapcs_vfpcc void @nearbyint(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture %pDst, i32 %n) #0 {
 ; CHECK-LABEL: nearbyint:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -259,17 +249,15 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %pSrcA, i32 %index
-  %next.gep14 = getelementptr float, float* %pDst, i32 %index
+  %next.gep = getelementptr float, ptr %pSrcA, i32 %index
+  %next.gep14 = getelementptr float, ptr %pDst, i32 %index
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = bitcast float* %next.gep to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
-  %1 = call fast <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.masked.load)
-  %2 = bitcast float* %next.gep14 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %active.lane.mask)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %next.gep, i32 4, <4 x i1> %active.lane.mask, <4 x float> undef)
+  %0 = call fast <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.masked.load)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %next.gep14, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %for.cond.cleanup, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -277,7 +265,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) #1
 
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>) #2
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) #2
 
 declare <4 x float> @llvm.trunc.v4f32(<4 x float>) #3
 
@@ -291,4 +279,4 @@ declare <4 x float> @llvm.floor.v4f32(<4 x float>) #3
 
 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) #1
 
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>) #4
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>) #4

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-sub-sat.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-sub-sat.ll
index 0e51661e8f58d..2764f69f1d5a8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-sub-sat.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-intrinsic-sub-sat.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc void @usub_sat(i16* noalias nocapture readonly %pSrcA, i16* noalias nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %blockSize) {
+define arm_aapcs_vfpcc void @usub_sat(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: usub_sat:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -31,26 +31,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i16, i16* %pDst, i32 %index
-  %next.gep21 = getelementptr i16, i16* %pSrcB, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i16, ptr %pDst, i32 %index
+  %next.gep21 = getelementptr i16, ptr %pSrcB, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %1 = bitcast i16* %next.gep21 to <8 x i16>*
-  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
-  %3 = bitcast i16* %next.gep20 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %2, <8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask)
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep21, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %0 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %next.gep20, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ssub_sat(i16* noalias nocapture readonly %pSrcA, i16* noalias nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %blockSize) {
+define arm_aapcs_vfpcc void @ssub_sat(ptr noalias nocapture readonly %pSrcA, ptr noalias nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: ssub_sat:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -80,20 +77,17 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i16, i16* %pDst, i32 %index
-  %next.gep21 = getelementptr i16, i16* %pSrcB, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i16, ptr %pDst, i32 %index
+  %next.gep21 = getelementptr i16, ptr %pSrcB, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %1 = bitcast i16* %next.gep21 to <8 x i16>*
-  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
-  %2 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
-  %3 = bitcast i16* %next.gep20 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %2, <8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask)
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %wide.masked.load24 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep21, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
+  %0 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load24)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %next.gep20, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret void
@@ -101,10 +95,10 @@ while.end:                                        ; preds = %vector.body, %entry
 
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
 
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
 
 declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
 
 declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
 
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-narrow.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-narrow.ll
index 956bf9207e188..02bdc26166073 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-narrow.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-narrow.ll
@@ -3,7 +3,7 @@
 ; TODO: We should be able to generate a vctp for the loads.
 ; CHECK-LABEL: trunc_v4i32_v4i16
 ; CHECK-NOT: vcpt
-define void @trunc_v4i32_v4i16(i32* readonly %a, i32* readonly %b, i16* %c, i32 %N) {
+define void @trunc_v4i32_v4i16(ptr readonly %a, ptr readonly %b, ptr %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -27,18 +27,15 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %mul = mul nsw <4 x i32> %wide.masked.load2, %wide.masked.load
   %trunc = trunc <4 x i32> %mul to <4 x i16>
-  %tmp6 = getelementptr inbounds i16, i16* %c, i32 %index
-  %tmp7 = bitcast i16* %tmp6 to <4 x i16>*
-  tail call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %trunc, <4 x i16>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i16, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i16.p0(<4 x i16> %trunc, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -48,7 +45,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32 immarg, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32 immarg, <4 x i1>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-pattern-fail.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-pattern-fail.ll
index 9c4a7ed350843..b3b25aa4e7873 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-pattern-fail.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-pattern-fail.ll
@@ -5,7 +5,7 @@
 ; CHECK-NOT: call i32 @llvm.arm.vctp
 
 ; trip.count.minus.1 has been inserted into element 1, not 0.
-define dso_local arm_aapcs_vfpcc void @wrong_ph_insert_0(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_ph_insert_0(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -29,17 +29,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -50,7 +47,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; The insert isn't using an undef for operand 0.
-define dso_local arm_aapcs_vfpcc void @wrong_ph_insert_def(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_ph_insert_def(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -74,17 +71,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -95,7 +89,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; The shuffle uses a defined value for operand 1.
-define dso_local arm_aapcs_vfpcc void @wrong_ph_shuffle_1(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_ph_shuffle_1(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -119,17 +113,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -140,7 +131,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; The shuffle uses a non zero value for operand 2.
-define dso_local arm_aapcs_vfpcc void @wrong_ph_shuffle_2(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_ph_shuffle_2(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -164,17 +155,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -185,7 +173,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; %N - 2
-define dso_local arm_aapcs_vfpcc void @trip_count_minus_2(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @trip_count_minus_2(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -209,17 +197,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -230,7 +215,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; index has been inserted at element 1, not 0.
-define dso_local arm_aapcs_vfpcc void @wrong_loop_insert(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_loop_insert(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -254,17 +239,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 1
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -274,7 +256,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @wrong_loop_invalid_index_splat(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_loop_invalid_index_splat(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -299,17 +281,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %incorrect, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -320,7 +299,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; Now using ult, not ule for the vector icmp
-define dso_local arm_aapcs_vfpcc void @wrong_pred_opcode(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @wrong_pred_opcode(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -344,17 +323,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ult <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -365,7 +341,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; The add in the body uses 1, 2, 3, 4
-define void @wrong_body_broadcast_splat(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define void @wrong_body_broadcast_splat(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -389,17 +365,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 1, i32 2, i32 3, i32 4>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -410,7 +383,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; Using a variable for the loop body broadcast.
-define void @wrong_body_broadcast_splat_2(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N, <4 x i32> %offsets) {
+define void @wrong_body_broadcast_splat_2(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N, <4 x i32> %offsets) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -434,17 +407,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, %offsets
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -455,7 +425,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 }
 
 ; adding 5, instead of 4, to index.
-define void @wrong_index_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define void @wrong_index_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -479,17 +449,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = icmp ule <4 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load12 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %tmp5 = mul nsw <4 x i32> %wide.masked.load12, %wide.masked.load
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp5, <4 x i32>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %tmp5, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 5
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -499,8 +466,8 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #1
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) #1
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #2
 declare i32 @llvm.start.loop.iterations.i32(i32) #3
 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll
index c9b2905755ed9..a8ad3605375a2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-widen.ll
@@ -2,7 +2,7 @@
 
 ; CHECK-LABEL: expand_v8i16_v8i32
 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
-define void @expand_v8i16_v8i32(i16* noalias nocapture readonly %a, i16* noalias nocapture readonly %b, i32* noalias nocapture %c, i32 %N) {
+define void @expand_v8i16_v8i32(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 7
@@ -20,19 +20,16 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i16, i16* %a, i32 %index
+  %tmp = getelementptr inbounds i16, ptr %a, i32 %index
   %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i16* %tmp to <8 x i16>*
-  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp2, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
-  %tmp3 = getelementptr inbounds i16, i16* %b, i32 %index
-  %tmp4 = bitcast i16* %tmp3 to <8 x i16>*
-  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
+  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
+  %tmp3 = getelementptr inbounds i16, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp3, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
   %expand.1 = zext <8 x i16> %wide.masked.load to <8 x i32>
   %expand.2 = zext <8 x i16> %wide.masked.load2 to <8 x i32>
   %mul = mul nsw <8 x i32> %expand.2, %expand.1
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %index
-  %tmp7 = bitcast i32* %tmp6 to <8 x i32>*
-  tail call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> %mul, <8 x i32>* %tmp7, i32 4, <8 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %mul, ptr %tmp6, i32 4, <8 x i1> %tmp1)
   %index.next = add i32 %index, 8
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -46,11 +43,11 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 ; CHECK: [[ELEMS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS_REM:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[ELEMS]])
 ; CHECK: [[ELEMS_REM]] = sub i32 [[ELEMS]], 8
-; CHECK: tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
+; CHECK: tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr {{.*}}, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
 ; CHECK: %store.pred = icmp ule <4 x i32> %induction.store
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> %store.pred)
-; CHECK: tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> {{.*}}, <4 x i32>* {{.*}}, i32 4, <4 x i1> %store.pred)
-define void @expand_v8i16_v4i32(i16* readonly %a, i16* readonly %b, i32* %c, i32* %d, i32 %N) {
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred)
+; CHECK: tail call void @llvm.masked.store.v4i32.p0(<4 x i32> {{.*}}, ptr {{.*}}, i32 4, <4 x i1> %store.pred)
+define void @expand_v8i16_v4i32(ptr readonly %a, ptr readonly %b, ptr %c, ptr %d, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 7
@@ -72,13 +69,11 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %store.idx = phi i32 [ 0, %vector.ph ], [ %store.idx.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i16, i16* %a, i32 %index
+  %tmp = getelementptr inbounds i16, ptr %a, i32 %index
   %tmp1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i16* %tmp to <8 x i16>*
-  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp2, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
-  %tmp3 = getelementptr inbounds i16, i16* %b, i32 %index
-  %tmp4 = bitcast i16* %tmp3 to <8 x i16>*
-  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
+  %wide.masked.load = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
+  %tmp3 = getelementptr inbounds i16, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %tmp3, i32 4, <8 x i1> %tmp1, <8 x i16> undef)
   %extract.2.low = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %extract.2.high = shufflevector <8 x i16> %wide.masked.load2, <8 x i16> undef, < 4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %expand.1 = zext <4 x i16> %extract.2.low to <4 x i32>
@@ -89,12 +84,10 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splat.store = shufflevector <4 x i32> %broadcast.splatinsert.store, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction.store = add <4 x i32> %broadcast.splat.store, <i32 0, i32 1, i32 2, i32 3>
   %store.pred = icmp ule <4 x i32> %induction.store, %broadcast.splat11.store
-  %tmp6 = getelementptr inbounds i32, i32* %c, i32 %store.idx
-  %tmp7 = bitcast i32* %tmp6 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %mul, <4 x i32>* %tmp7, i32 4, <4 x i1> %store.pred)
-  %gep = getelementptr inbounds i32, i32* %d, i32 %store.idx
-  %cast.gep = bitcast i32* %gep to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %sub, <4 x i32>* %cast.gep, i32 4, <4 x i1> %store.pred)
+  %tmp6 = getelementptr inbounds i32, ptr %c, i32 %store.idx
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %mul, ptr %tmp6, i32 4, <4 x i1> %store.pred)
+  %gep = getelementptr inbounds i32, ptr %d, i32 %store.idx
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %sub, ptr %gep, i32 4, <4 x i1> %store.pred)
   %store.idx.next = add i32 %store.idx, 4
   %index.next = add i32 %index, 8
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
@@ -107,7 +100,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 
 ; CHECK-LABEL: expand_v4i32_v4i64
 ; CHECK-NOT: call i32 @llvm.arm.mve.vctp
-define void @expand_v4i32_v4i64(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i64* noalias nocapture %c, i32 %N) {
+define void @expand_v4i32_v4i64(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 3
@@ -125,19 +118,16 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %tmp14 = phi i32 [ %start, %vector.ph ], [ %tmp15, %vector.body ]
-  %tmp = getelementptr inbounds i32, i32* %a, i32 %index
+  %tmp = getelementptr inbounds i32, ptr %a, i32 %index
   %tmp1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %tmp2 = bitcast i32* %tmp to <4 x i32>*
-  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp2, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
-  %tmp3 = getelementptr inbounds i32, i32* %b, i32 %index
-  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
-  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp4, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %wide.masked.load = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
+  %tmp3 = getelementptr inbounds i32, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %tmp3, i32 4, <4 x i1> %tmp1, <4 x i32> undef)
   %expand.1 = zext <4 x i32> %wide.masked.load to <4 x i64>
   %expand.2 = zext <4 x i32> %wide.masked.load2 to <4 x i64>
   %mul = mul nsw <4 x i64> %expand.2, %expand.1
-  %tmp6 = getelementptr inbounds i64, i64* %c, i32 %index
-  %tmp7 = bitcast i64* %tmp6 to <4 x i64>*
-  tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %mul, <4 x i64>* %tmp7, i32 4, <4 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i64, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %mul, ptr %tmp6, i32 4, <4 x i1> %tmp1)
   %index.next = add i32 %index, 4
   %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp14, i32 1)
   %tmp16 = icmp ne i32 %tmp15, 0
@@ -147,11 +137,11 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32 immarg, <8 x i1>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32 immarg, <4 x i1>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+declare void @llvm.masked.store.v8i32.p0(<8 x i32>, ptr, i32 immarg, <8 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, i32 immarg, <4 x i1>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
index df0afcfd14473..07c06e10979cd 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
@@ -14,7 +14,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=force-enabled-no-reductions %s -o - | \
 ; RUN:   FileCheck %s --check-prefix=NOREDUCTIONS
 
-define dso_local void @varying_outer_2d_reduction(i16* nocapture readonly %Input, i16* nocapture %Output, i16 signext %Size, i16 signext %N, i16 signext %Scale) local_unnamed_addr {
+define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input, ptr nocapture %Output, i16 signext %Size, i16 signext %N, i16 signext %Scale) local_unnamed_addr {
 ; ENABLED-LABEL: varying_outer_2d_reduction:
 ; ENABLED:       @ %bb.0: @ %entry
 ; ENABLED-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -169,7 +169,7 @@ for.body.lr.ph:                                   ; preds = %entry
 
 for.body:                                         ; preds = %for.end, %for.body.lr.ph
   %lsr.iv51 = phi i32 [ %lsr.iv.next, %for.end ], [ %i, %for.body.lr.ph ]
-  %lsr.iv46 = phi i16* [ %scevgep47, %for.end ], [ %Input, %for.body.lr.ph ]
+  %lsr.iv46 = phi ptr [ %scevgep47, %for.end ], [ %Input, %for.body.lr.ph ]
   %i.037 = phi i32 [ 0, %for.body.lr.ph ], [ %inc16, %for.end ]
   %i1 = mul nsw i32 %i.037, -1
   %i2 = add i32 %i, %i1
@@ -187,17 +187,15 @@ vector.ph:                                        ; preds = %for.body
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %lsr.iv48 = phi i16* [ %scevgep49, %vector.body ], [ %lsr.iv46, %vector.ph ]
-  %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %Input, %vector.ph ]
+  %lsr.iv48 = phi ptr [ %scevgep49, %vector.body ], [ %lsr.iv46, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %Input, %vector.ph ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %i16, %vector.body ]
   %i9 = phi i32 [ %start, %vector.ph ], [ %i17, %vector.body ]
-  %lsr.iv4850 = bitcast i16* %lsr.iv48 to <4 x i16>*
-  %lsr.iv45 = bitcast i16* %lsr.iv to <4 x i16>*
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %i8)
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv45, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
   %i10 = sext <4 x i16> %wide.masked.load to <4 x i32>
-  %wide.masked.load42 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv4850, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
+  %wide.masked.load42 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv48, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
   %i11 = sext <4 x i16> %wide.masked.load42 to <4 x i32>
   %i12 = mul nsw <4 x i32> %i11, %i10
   %i13 = insertelement <4 x i32> undef, i32 %conv1032, i32 0
@@ -205,8 +203,8 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %i15 = ashr <4 x i32> %i12, %i14
   %i16 = add <4 x i32> %i15, %vec.phi
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
-  %scevgep49 = getelementptr i16, i16* %lsr.iv48, i32 4
+  %scevgep = getelementptr i16, ptr %lsr.iv, i32 4
+  %scevgep49 = getelementptr i16, ptr %lsr.iv48, i32 4
   %i17 = call i32 @llvm.loop.decrement.reg.i32(i32 %i9, i32 1)
   %i18 = icmp ne i32 %i17, 0
   br i1 %i18, label %vector.body, label %middle.block
@@ -220,10 +218,10 @@ for.end:                                          ; preds = %middle.block, %for.
   %Sum.0.lcssa = phi i32 [ 0, %for.body ], [ %i20, %middle.block ]
   %i21 = lshr i32 %Sum.0.lcssa, 16
   %conv13 = trunc i32 %i21 to i16
-  %arrayidx14 = getelementptr inbounds i16, i16* %Output, i32 %i.037
-  store i16 %conv13, i16* %arrayidx14, align 2
+  %arrayidx14 = getelementptr inbounds i16, ptr %Output, i32 %i.037
+  store i16 %conv13, ptr %arrayidx14, align 2
   %inc16 = add nuw nsw i32 %i.037, 1
-  %scevgep47 = getelementptr i16, i16* %lsr.iv46, i32 1
+  %scevgep47 = getelementptr i16, ptr %lsr.iv46, i32 1
   %lsr.iv.next = add i32 %lsr.iv51, -1
   %exitcond39 = icmp eq i32 %inc16, %conv
   br i1 %exitcond39, label %for.end17, label %for.body
@@ -233,7 +231,7 @@ for.end17:                                        ; preds = %for.end, %entry
 }
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
 declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
 declare i32 @llvm.start.loop.iterations.i32(i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
index ec6a7554b3e66..ec542df07e684 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
@@ -11,14 +11,14 @@
 ; CHECK: [[ELTS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[SUB:%[^ ]+]], %vector.body ]
 ; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[ELTS]])
 ; CHECK: [[SUB]] = sub i32 [[ELTS]], 4
-; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]]
-; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]],
+; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]]
+; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{.*}}, i32 4, <4 x i1> [[VCTP]],
 
 ; CHECK: middle.block:
 ; CHECK: [[VPSEL:%[^ ]+]] = select <4 x i1> [[VCTP]],
 ; CHECK: call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VPSEL]])
 
-define i32 @vec_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %N) {
+define i32 @vec_mul_reduce_add(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %0 = add i32 %N, 3
@@ -35,20 +35,18 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %lsr.iv2 = phi i32* [ %scevgep3, %vector.body ], [ %a, %vector.ph ]
-  %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %b, %vector.ph ]
+  %lsr.iv2 = phi ptr [ %scevgep3, %vector.body ], [ %a, %vector.ph ]
+  %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %b, %vector.ph ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %9, %vector.body ]
   %6 = phi i32 [ %start, %vector.ph ], [ %10, %vector.body ]
-  %lsr.iv24 = bitcast i32* %lsr.iv2 to <4 x i32>*
-  %lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>*
   %7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv24, i32 4, <4 x i1> %7, <4 x i32> undef)
-  %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1, i32 4, <4 x i1> %7, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv2, i32 4, <4 x i1> %7, <4 x i32> undef)
+  %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv, i32 4, <4 x i1> %7, <4 x i32> undef)
   %8 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load
   %9 = add nsw <4 x i32> %8, %vec.phi
   %index.next = add i32 %index, 4
-  %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
-  %scevgep3 = getelementptr i32, i32* %lsr.iv2, i32 4
+  %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
+  %scevgep3 = getelementptr i32, ptr %lsr.iv2, i32 4
   %10 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
   %11 = icmp ne i32 %10, 0
   br i1 %11, label %vector.body, label %middle.block
@@ -63,7 +61,7 @@ for.cond.cleanup:                                 ; preds = %middle.block, %entr
   ret i32 %res.0.lcssa
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
 declare i32 @llvm.start.loop.iterations.i32(i32)
 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-unroll.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-unroll.ll
index 4f8add4860958..24d76694dcff3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-unroll.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-unroll.ll
@@ -3,7 +3,7 @@
 ; TODO: The unrolled pattern is preventing the transform
 ; CHECK-LABEL: mul_v16i8_unroll
 ; CHECK-NOT: call i32 @llvm.arm.vcpt
-define void @mul_v16i8_unroll(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) {
+define void @mul_v16i8_unroll(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture %c, i32 %N) {
 entry:
   %cmp8 = icmp eq i32 %N, 0
   %tmp8 = add i32 %N, 15
@@ -33,33 +33,27 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <16 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <16 x i32> %broadcast.splatinsert, <16 x i32> undef, <16 x i32> zeroinitializer
   %induction = add <16 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %tmp = getelementptr inbounds i8, i8* %a, i32 %index
+  %tmp = getelementptr inbounds i8, ptr %a, i32 %index
   %tmp1 = icmp ule <16 x i32> %induction, %broadcast.splat11
-  %tmp2 = bitcast i8* %tmp to <16 x i8>*
-  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2, i32 4, <16 x i1> %tmp1, <16 x i8> undef)
-  %tmp3 = getelementptr inbounds i8, i8* %b, i32 %index
-  %tmp4 = bitcast i8* %tmp3 to <16 x i8>*
-  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4, i32 4, <16 x i1> %tmp1, <16 x i8> undef)
+  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp, i32 4, <16 x i1> %tmp1, <16 x i8> undef)
+  %tmp3 = getelementptr inbounds i8, ptr %b, i32 %index
+  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp3, i32 4, <16 x i1> %tmp1, <16 x i8> undef)
   %mul = mul nsw <16 x i8> %wide.masked.load2, %wide.masked.load
-  %tmp6 = getelementptr inbounds i8, i8* %c, i32 %index
-  %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
-  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %mul, <16 x i8>* %tmp7, i32 4, <16 x i1> %tmp1)
+  %tmp6 = getelementptr inbounds i8, ptr %c, i32 %index
+  tail call void @llvm.masked.store.v16i8.p0(<16 x i8> %mul, ptr %tmp6, i32 4, <16 x i1> %tmp1)
   %index.next = add nuw nsw i32 %index, 16
   %niter.nsub = sub i32 %niter, 1
   %broadcast.splatinsert.1 = insertelement <16 x i32> undef, i32 %index.next, i32 0
   %broadcast.splat.1 = shufflevector <16 x i32> %broadcast.splatinsert.1, <16 x i32> undef, <16 x i32> zeroinitializer
   %induction.1 = add <16 x i32> %broadcast.splat.1, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %tmp.1 = getelementptr inbounds i8, i8* %a, i32 %index.next
+  %tmp.1 = getelementptr inbounds i8, ptr %a, i32 %index.next
   %tmp1.1 = icmp ule <16 x i32> %induction.1, %broadcast.splat11
-  %tmp2.1 = bitcast i8* %tmp.1 to <16 x i8>*
-  %wide.masked.load.1 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2.1, i32 4, <16 x i1> %tmp1.1, <16 x i8> undef)
-  %tmp3.1 = getelementptr inbounds i8, i8* %b, i32 %index.next
-  %tmp4.1 = bitcast i8* %tmp3.1 to <16 x i8>*
-  %wide.masked.load2.1 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4.1, i32 4, <16 x i1> %tmp1.1, <16 x i8> undef)
+  %wide.masked.load.1 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp.1, i32 4, <16 x i1> %tmp1.1, <16 x i8> undef)
+  %tmp3.1 = getelementptr inbounds i8, ptr %b, i32 %index.next
+  %wide.masked.load2.1 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp3.1, i32 4, <16 x i1> %tmp1.1, <16 x i8> undef)
   %mul.1 = mul nsw <16 x i8> %wide.masked.load2.1, %wide.masked.load.1
-  %tmp6.1 = getelementptr inbounds i8, i8* %c, i32 %index.next
-  %tmp7.1 = bitcast i8* %tmp6.1 to <16 x i8>*
-  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %mul.1, <16 x i8>* %tmp7.1, i32 4, <16 x i1> %tmp1.1)
+  %tmp6.1 = getelementptr inbounds i8, ptr %c, i32 %index.next
+  tail call void @llvm.masked.store.v16i8.p0(<16 x i8> %mul.1, ptr %tmp6.1, i32 4, <16 x i1> %tmp1.1)
   %index.next.1 = add i32 %index.next, 16
   %niter.nsub.1 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %niter.nsub, i32 1)
   %niter.ncmp.1 = icmp ne i32 %niter.nsub.1, 0
@@ -85,17 +79,14 @@ vector.body.epil:                                 ; preds = %vector.body.epil.pr
   %broadcast.splatinsert.epil = insertelement <16 x i32> undef, i32 %index.epil, i32 0
   %broadcast.splat.epil = shufflevector <16 x i32> %broadcast.splatinsert.epil, <16 x i32> undef, <16 x i32> zeroinitializer
   %induction.epil = add <16 x i32> %broadcast.splat.epil, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %tmp.epil = getelementptr inbounds i8, i8* %a, i32 %index.epil
+  %tmp.epil = getelementptr inbounds i8, ptr %a, i32 %index.epil
   %tmp1.epil = icmp ule <16 x i32> %induction.epil, %broadcast.splat11
-  %tmp2.epil = bitcast i8* %tmp.epil to <16 x i8>*
-  %wide.masked.load.epil = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2.epil, i32 4, <16 x i1> %tmp1.epil, <16 x i8> undef)
-  %tmp3.epil = getelementptr inbounds i8, i8* %b, i32 %index.epil
-  %tmp4.epil = bitcast i8* %tmp3.epil to <16 x i8>*
-  %wide.masked.load2.epil = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4.epil, i32 4, <16 x i1> %tmp1.epil, <16 x i8> undef)
+  %wide.masked.load.epil = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp.epil, i32 4, <16 x i1> %tmp1.epil, <16 x i8> undef)
+  %tmp3.epil = getelementptr inbounds i8, ptr %b, i32 %index.epil
+  %wide.masked.load2.epil = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %tmp3.epil, i32 4, <16 x i1> %tmp1.epil, <16 x i8> undef)
   %mul.epil = mul nsw <16 x i8> %wide.masked.load2.epil, %wide.masked.load.epil
-  %tmp6.epil = getelementptr inbounds i8, i8* %c, i32 %index.epil
-  %tmp7.epil = bitcast i8* %tmp6.epil to <16 x i8>*
-  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %mul.epil, <16 x i8>* %tmp7.epil, i32 4, <16 x i1> %tmp1.epil)
+  %tmp6.epil = getelementptr inbounds i8, ptr %c, i32 %index.epil
+  tail call void @llvm.masked.store.v16i8.p0(<16 x i8> %mul.epil, ptr %tmp6.epil, i32 4, <16 x i1> %tmp1.epil)
   %index.next.epil = add i32 %index.epil, 16
   %tmp15.epil = add nuw nsw i32 %tmp14.epil, -1
   %tmp16.epil = icmp ne i32 %tmp15.epil, 0
@@ -111,8 +102,8 @@ for.cond.cleanup:                                 ; preds = %for.cond.cleanup.lo
   ret void
 }
 
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #1
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>) #2
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>) #1
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>) #2
 declare i32 @llvm.start.loop.iterations.i32(i32) #3
 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
index 845436ea301b8..fc58873f9857b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs %s -o - | FileCheck %s
 
 ; Tail predicated so we use DLSTP
-define void @simple(i32* nocapture readonly %x, i32* nocapture readnone %y, i32* nocapture %z, i32 %m, i32 %n) {
+define void @simple(ptr nocapture readonly %x, ptr nocapture readnone %y, ptr nocapture %z, i32 %m, i32 %n) {
 ; CHECK-LABEL: simple:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -32,13 +32,12 @@ do.body:                                          ; preds = %entry, %do.body
   %n.addr.0 = phi i32 [ %sub, %do.body ], [ %n, %entry ]
   %count.0 = phi i32 [ %sub3, %do.body ], [ %div, %entry ]
   %s.0 = phi i32 [ %add2, %do.body ], [ %m, %entry ]
-  %x.addr.0 = phi i32* [ %add.ptr, %do.body ], [ %x, %entry ]
+  %x.addr.0 = phi ptr [ %add.ptr, %do.body ], [ %x, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %n.addr.0)
-  %1 = bitcast i32* %x.addr.0 to <4 x i32>*
-  %2 = load <4 x i32>, <4 x i32>* %1, align 4
-  %3 = tail call i32 @llvm.arm.mve.addv.predicated.v4i32.v4i1(<4 x i32> %2, i32 0, <4 x i1> %0)
-  %add2 = add nsw i32 %3, %s.0
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.0, i32 4
+  %1 = load <4 x i32>, ptr %x.addr.0, align 4
+  %2 = tail call i32 @llvm.arm.mve.addv.predicated.v4i32.v4i1(<4 x i32> %1, i32 0, <4 x i1> %0)
+  %add2 = add nsw i32 %2, %s.0
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.0, i32 4
   %sub = add i32 %n.addr.0, -4
   %sub3 = add nsw i32 %count.0, -1
   %cmp4 = icmp sgt i32 %count.0, 1
@@ -46,12 +45,12 @@ do.body:                                          ; preds = %entry, %do.body
 
 if.end:                                           ; preds = %do.body, %entry
   %s.1 = phi i32 [ %m, %entry ], [ %add2, %do.body ]
-  store i32 %s.1, i32* %z, align 4
+  store i32 %s.1, ptr %z, align 4
   ret void
 }
 
 ; Tail predicated so we use DLSTP
-define void @nested(i32* nocapture readonly %x, i32* nocapture readnone %y, i32* nocapture %z, i32 %m, i32 %n) {
+define void @nested(ptr nocapture readonly %x, ptr nocapture readnone %y, ptr nocapture %z, i32 %m, i32 %n) {
 ; CHECK-LABEL: nested:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -102,7 +101,7 @@ for.cond.cleanup:                                 ; preds = %if.end, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %if.end
-  %x.addr.023 = phi i32* [ %x.addr.2, %if.end ], [ %x, %entry ]
+  %x.addr.023 = phi ptr [ %x.addr.2, %if.end ], [ %x, %entry ]
   %a.022 = phi i32 [ %inc, %if.end ], [ 0, %entry ]
   %n.addr.021 = phi i32 [ %n.addr.2, %if.end ], [ %n, %entry ]
   %add = add i32 %n.addr.021, 3
@@ -112,35 +111,34 @@ for.body:                                         ; preds = %entry, %if.end
 
 do.body.preheader:                                ; preds = %for.body
   %0 = and i32 %add, -4
-  %scevgep = getelementptr i32, i32* %x.addr.023, i32 %0
+  %scevgep = getelementptr i32, ptr %x.addr.023, i32 %0
   br label %do.body
 
 do.body:                                          ; preds = %do.body.preheader, %do.body
   %n.addr.1 = phi i32 [ %sub, %do.body ], [ %n.addr.021, %do.body.preheader ]
   %count.0 = phi i32 [ %sub4, %do.body ], [ %div, %do.body.preheader ]
   %s.0 = phi i32 [ %add3, %do.body ], [ %m, %do.body.preheader ]
-  %x.addr.1 = phi i32* [ %add.ptr, %do.body ], [ %x.addr.023, %do.body.preheader ]
+  %x.addr.1 = phi ptr [ %add.ptr, %do.body ], [ %x.addr.023, %do.body.preheader ]
   %1 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %n.addr.1)
-  %2 = bitcast i32* %x.addr.1 to <4 x i32>*
-  %3 = load <4 x i32>, <4 x i32>* %2, align 4
-  %4 = tail call i32 @llvm.arm.mve.addv.predicated.v4i32.v4i1(<4 x i32> %3, i32 0, <4 x i1> %1)
-  %add3 = add nsw i32 %4, %s.0
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.1, i32 4
+  %2 = load <4 x i32>, ptr %x.addr.1, align 4
+  %3 = tail call i32 @llvm.arm.mve.addv.predicated.v4i32.v4i1(<4 x i32> %2, i32 0, <4 x i1> %1)
+  %add3 = add nsw i32 %3, %s.0
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.1, i32 4
   %sub = add i32 %n.addr.1, -4
   %sub4 = add nsw i32 %count.0, -1
   %cmp5 = icmp sgt i32 %count.0, 1
   br i1 %cmp5, label %do.body, label %if.end.loopexit
 
 if.end.loopexit:                                  ; preds = %do.body
-  %5 = sub i32 %n.addr.021, %0
+  %4 = sub i32 %n.addr.021, %0
   br label %if.end
 
 if.end:                                           ; preds = %if.end.loopexit, %for.body
-  %n.addr.2 = phi i32 [ %n.addr.021, %for.body ], [ %5, %if.end.loopexit ]
+  %n.addr.2 = phi i32 [ %n.addr.021, %for.body ], [ %4, %if.end.loopexit ]
   %s.1 = phi i32 [ %m, %for.body ], [ %add3, %if.end.loopexit ]
-  %x.addr.2 = phi i32* [ %x.addr.023, %for.body ], [ %scevgep, %if.end.loopexit ]
-  %arrayidx = getelementptr inbounds i32, i32* %z, i32 %a.022
-  store i32 %s.1, i32* %arrayidx, align 4
+  %x.addr.2 = phi ptr [ %x.addr.023, %for.body ], [ %scevgep, %if.end.loopexit ]
+  %arrayidx = getelementptr inbounds i32, ptr %z, i32 %a.022
+  store i32 %s.1, ptr %arrayidx, align 4
   %inc = add nuw nsw i32 %a.022, 1
   %exitcond.not = icmp eq i32 %inc, %m
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
@@ -151,7 +149,7 @@ declare i32 @llvm.arm.mve.addv.predicated.v4i32.v4i1(<4 x i32>, i32, <4 x i1>)
 
 
 ; Long test that was spilling lr between t2LoopDec and End
-define dso_local i32 @b(i32* %c, i32 %d, i32 %e, i32* %n) "frame-pointer"="all" {
+define dso_local i32 @b(ptr %c, i32 %d, i32 %e, ptr %n) "frame-pointer"="all" {
 ; CHECK-LABEL: b:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -238,26 +236,26 @@ define dso_local i32 @b(i32* %c, i32 %d, i32 %e, i32* %n) "frame-pointer"="all"
 ; CHECK-NEXT:    pop.w {r8, r9, r10, r11}
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %0 = inttoptr i32 %e to i32*
+  %0 = inttoptr i32 %e to ptr
   %tobool.not70 = icmp eq i32 %d, 0
   br i1 %tobool.not70, label %while.end, label %while.body
 
 while.body:                                       ; preds = %entry, %while.body
-  %p.077 = phi i32* [ %incdec.ptr22, %while.body ], [ inttoptr (i32 2 to i32*), %entry ]
-  %c.addr.076 = phi i32* [ %incdec.ptr1, %while.body ], [ %c, %entry ]
-  %n.075 = phi i32* [ %incdec.ptr43, %while.body ], [ %n, %entry ]
+  %p.077 = phi ptr [ %incdec.ptr22, %while.body ], [ inttoptr (i32 2 to ptr), %entry ]
+  %c.addr.076 = phi ptr [ %incdec.ptr1, %while.body ], [ %c, %entry ]
+  %n.075 = phi ptr [ %incdec.ptr43, %while.body ], [ %n, %entry ]
   %m.074 = phi i32 [ %conv35, %while.body ], [ undef, %entry ]
   %d.addr.073 = phi i32 [ %dec, %while.body ], [ %d, %entry ]
   %h.072 = phi i32 [ %conv41, %while.body ], [ undef, %entry ]
-  %incdec.ptr43 = getelementptr inbounds i32, i32* %n.075, i32 1
-  %1 = ptrtoint i32* %n.075 to i32
-  %2 = load i32, i32* %incdec.ptr43, align 4
-  %3 = load i32, i32* %c.addr.076, align 4
+  %incdec.ptr43 = getelementptr inbounds i32, ptr %n.075, i32 1
+  %1 = ptrtoint ptr %n.075 to i32
+  %2 = load i32, ptr %incdec.ptr43, align 4
+  %3 = load i32, ptr %c.addr.076, align 4
   %mul = mul nsw i32 %3, %1
   %conv = sext i32 %mul to i64
   %add = add nsw i64 %conv, 2147483648
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %c.addr.076, i32 1
-  %4 = ptrtoint i32* %c.addr.076 to i32
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %c.addr.076, i32 1
+  %4 = ptrtoint ptr %c.addr.076 to i32
   %mul2 = mul nsw i32 %2, %4
   %conv3 = sext i32 %mul2 to i64
   %add4 = add nsw i64 %conv3, 2147483648
@@ -266,7 +264,7 @@ while.body:                                       ; preds = %entry, %while.body
   %conv6 = ashr exact i64 %5, 32
   %conv7 = sext i32 %2 to i64
   %conv11 = sext i32 %h.072 to i64
-  %6 = load i32, i32* %incdec.ptr1, align 4
+  %6 = load i32, ptr %incdec.ptr1, align 4
   %mul12 = mul nsw i32 %6, %1
   %conv13 = sext i32 %mul12 to i64
   %add14 = add nuw nsw i64 %conv11, 2147483648
@@ -279,8 +277,8 @@ while.body:                                       ; preds = %entry, %while.body
   %sh_prom = zext i32 %2 to i64
   %shl = shl i64 %conv18, %sh_prom
   %conv21 = sext i32 %conv17 to i64
-  %incdec.ptr22 = getelementptr inbounds i32, i32* %p.077, i32 -1
-  %8 = load i32, i32* %p.077, align 4
+  %incdec.ptr22 = getelementptr inbounds i32, ptr %p.077, i32 -1
+  %8 = load i32, ptr %p.077, align 4
   %conv23 = sext i32 %8 to i64
   %conv24 = sext i32 %m.074 to i64
   %mul25 = mul nsw i64 %conv23, %conv24
@@ -288,7 +286,7 @@ while.body:                                       ; preds = %entry, %while.body
   %add26 = add nsw i64 %sub, %conv21
   %9 = shl i64 %shl, 30
   %conv27 = ashr i64 %9, 32
-  %10 = load i32, i32* %incdec.ptr22, align 4
+  %10 = load i32, ptr %incdec.ptr22, align 4
   %mul28 = mul nsw i32 %10, %m.074
   %add29 = add nsw i32 %mul28, 2
   %sh_prom30 = zext i32 %add29 to i64
@@ -297,8 +295,8 @@ while.body:                                       ; preds = %entry, %while.body
   %shr33 = ashr i64 %add26, %add32
   %11 = lshr i64 %shr33, 2
   %conv35 = trunc i64 %11 to i32
-  store i32 %conv35, i32* inttoptr (i32 2 to i32*), align 4
-  %12 = load i32, i32* %incdec.ptr22, align 4
+  store i32 %conv35, ptr inttoptr (i32 2 to ptr), align 4
+  %12 = load i32, ptr %incdec.ptr22, align 4
   %mul36 = mul nsw i32 %12, %2
   %sub37 = sub nsw i32 %conv17, %mul36
   %conv38 = sext i32 %sub37 to i64
@@ -306,8 +304,8 @@ while.body:                                       ; preds = %entry, %while.body
   %13 = lshr i64 %add39, 2
   %conv41 = trunc i64 %13 to i32
   %sub42 = sub nsw i32 0, %conv41
-  store i32 %sub42, i32* %0, align 4
-  store i32 %sub42, i32* %n.075, align 4
+  store i32 %sub42, ptr %0, align 4
+  store i32 %sub42, ptr %n.075, align 4
   %dec = add nsw i32 %d.addr.073, -1
   %tobool.not = icmp eq i32 %dec, 0
   br i1 %tobool.not, label %while.end, label %while.body
@@ -317,7 +315,7 @@ while.end:                                        ; preds = %while.body, %entry
 }
 
 declare void @callee()
-define void @callinpreheader(i32* noalias nocapture readonly %pAngle, i32* nocapture %pDst, i32 %size) {
+define void @callinpreheader(ptr noalias nocapture readonly %pAngle, ptr nocapture %pDst, i32 %size) {
 ; CHECK-LABEL: callinpreheader:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -350,8 +348,8 @@ for.body.ph:
 for.body:
   %i.09 = phi i32 [ %inc, %for.body ], [ 0, %for.body.ph ]
   %s.08 = phi i32 [ %add, %for.body ], [ 0, %for.body.ph ]
-  %arrayidx = getelementptr inbounds i32, i32* %pAngle, i32 %i.09
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %pAngle, i32 %i.09
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %s.08
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond.not = icmp eq i32 %inc, %size
@@ -359,6 +357,6 @@ for.body:
 
 for.cond.cleanup:
   %s.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  store i32 %s.0.lcssa, i32* %pDst, align 4
+  store i32 %s.0.lcssa, ptr %pDst, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index a5f9c511e0680..3ce79225cd5e6 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -323,7 +323,7 @@ define <16 x i8> @v16i8(i32 %index, i32 %TC, <16 x i8> %V1, <16 x i8> %V2) {
   ret <16 x i8> %select
 }
 
-define void @test_width2(i32* nocapture readnone %x, i32* nocapture %y, i8 zeroext %m) {
+define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext %m) {
 ; CHECK-LABEL: test_width2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r7, lr}
@@ -394,15 +394,13 @@ vector.body:                                      ; preds = %vector.body, %for.b
   %index = phi i32 [ 0, %for.body.preheader ], [ %index.next, %vector.body ]
   %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 %index, i32 %wide.trip.count)
   %0 = add nsw i32 %index, -2
-  %1 = getelementptr inbounds i32, i32* %y, i32 %0
-  %2 = bitcast i32* %1 to <2 x i32>*
-  %wide.masked.load = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %2, i32 4, <2 x i1> %active.lane.mask, <2 x i32> undef)
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <2 x i32>*
-  call void @llvm.masked.store.v2i32.p0v2i32(<2 x i32> %wide.masked.load, <2 x i32>* %4, i32 4, <2 x i1> %active.lane.mask)
+  %1 = getelementptr inbounds i32, ptr %y, i32 %0
+  %wide.masked.load = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %1, i32 4, <2 x i1> %active.lane.mask, <2 x i32> undef)
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  call void @llvm.masked.store.v2i32.p0(<2 x i32> %wide.masked.load, ptr %2, i32 4, <2 x i1> %active.lane.mask)
   %index.next = add i32 %index, 2
-  %5 = icmp eq i32 %index.next, %n.vec
-  br i1 %5, label %for.cond.cleanup, label %vector.body
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -413,5 +411,5 @@ declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
 declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i32(i32, i32)
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
-declare <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
-declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
+declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
+declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/aligned-constants.ll b/llvm/test/CodeGen/Thumb2/aligned-constants.ll
index 27eaf4744beb4..04bdcdfbf80ed 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-constants.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-constants.ll
@@ -14,13 +14,13 @@ target triple = "thumbv7-apple-ios"
 ; CHECK:	.long	1080815255
 ; CHECK: LCPI
 ; CHECK:	.long	0x42f6e979
-define void @func(float* nocapture %x, double* nocapture %y) nounwind ssp {
+define void @func(ptr nocapture %x, ptr nocapture %y) nounwind ssp {
 entry:
-  %0 = load float, float* %x, align 4
+  %0 = load float, ptr %x, align 4
   %add = fadd float %0, 0x405EDD2F20000000
-  store float %add, float* %x, align 4
-  %1 = load double, double* %y, align 4
+  store float %add, ptr %x, align 4
+  %1 = load double, ptr %y, align 4
   %add1 = fadd double %1, 2.234560e+02
-  store double %add1, double* %y, align 4
+  store double %add1, ptr %y, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
index 67684a8930616..7a9b3a5990c60 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1-m.main-none-eabi -mcpu=cortex-m55 -O3 < %s | FileCheck %s
 
-define i32 @loop(i32* nocapture readonly %x) {
+define i32 @loop(ptr nocapture readonly %x) {
 ; CHECK-LABEL: loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -26,15 +26,15 @@ for.cond.cleanup:                                 ; preds = %for.body
 for.body:                                         ; preds = %entry, %for.body
   %i.07 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %s.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.07
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.07
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %s.06
   %inc = add nuw nsw i32 %i.07, 1
   %exitcond.not = icmp eq i32 %inc, 500
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define i64 @loopif(i32* nocapture readonly %x, i32 %y, i32 %n) {
+define i64 @loopif(ptr nocapture readonly %x, i32 %y, i32 %n) {
 ; CHECK-LABEL: loopif:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -77,8 +77,8 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %s.08 = phi i64 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.09
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.09
+  %0 = load i32, ptr %arrayidx, align 4
   %conv = sext i32 %0 to i64
   %mul = mul nsw i64 %conv, %conv1
   %add = add nsw i64 %mul, %s.08

diff  --git a/llvm/test/CodeGen/Thumb2/aligned-spill.ll b/llvm/test/CodeGen/Thumb2/aligned-spill.ll
index 1e88f821235bf..7318d201053dc 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-spill.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-spill.ll
@@ -11,12 +11,12 @@ target triple = "thumbv7-apple-ios"
 ; CHECK: push {r4, r7, lr}
 ; CHECK: bfc r4, #0, #3
 ; CHECK: mov sp, r4
-define void @f(double* nocapture %p) nounwind ssp "frame-pointer"="all" {
+define void @f(ptr nocapture %p) nounwind ssp "frame-pointer"="all" {
 entry:
-  %0 = load double, double* %p, align 4
+  %0 = load double, ptr %p, align 4
   tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
   tail call void @g() nounwind
-  store double %0, double* %p, align 4
+  store double %0, ptr %p, align 4
   ret void
 }
 
@@ -45,7 +45,7 @@ entry:
 declare void @g()
 
 ; Spill 7 d-registers.
-define void @f7(double* nocapture %p) nounwind ssp "frame-pointer"="all" {
+define void @f7(ptr nocapture %p) nounwind ssp "frame-pointer"="all" {
 entry:
   tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14}"() nounwind
   ret void
@@ -69,7 +69,7 @@ entry:
 ; NEON: pop
 
 ; Spill 7 d-registers, leave a hole.
-define void @f3plus4(double* nocapture %p) nounwind ssp "frame-pointer"="all" {
+define void @f3plus4(ptr nocapture %p) nounwind ssp "frame-pointer"="all" {
 entry:
   tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d12},~{d13},~{d14},~{d15}"() nounwind
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/bfi.ll b/llvm/test/CodeGen/Thumb2/bfi.ll
index 337f46a227489..4647d329e787c 100644
--- a/llvm/test/CodeGen/Thumb2/bfi.ll
+++ b/llvm/test/CodeGen/Thumb2/bfi.ll
@@ -2,17 +2,17 @@
 
 %struct.F = type { [3 x i8], i8 }
 
- at X = common global %struct.F zeroinitializer, align 4 ; <%struct.F*> [#uses=1]
+ at X = common global %struct.F zeroinitializer, align 4 ; <ptr> [#uses=1]
 
 define void @f1([1 x i32] %f.coerce0) nounwind {
 entry:
 ; CHECK: f1
 ; CHECK: movs r2, #10
 ; CHECK: bfi r1, r2, #22, #4
-  %0 = load i32, i32* bitcast (%struct.F* @X to i32*), align 4 ; <i32> [#uses=1]
+  %0 = load i32, ptr @X, align 4 ; <i32> [#uses=1]
   %1 = and i32 %0, -62914561                      ; <i32> [#uses=1]
   %2 = or i32 %1, 41943040                        ; <i32> [#uses=1]
-  store i32 %2, i32* bitcast (%struct.F* @X to i32*), align 4
+  store i32 %2, ptr @X, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/bicbfi.ll b/llvm/test/CodeGen/Thumb2/bicbfi.ll
index 5f51f8c46f806..91f64e16dc8f1 100644
--- a/llvm/test/CodeGen/Thumb2/bicbfi.ll
+++ b/llvm/test/CodeGen/Thumb2/bicbfi.ll
@@ -5,13 +5,13 @@ target triple = "thumbv7--linux-gnueabihf"
 
 ; CHECK-LABEL: f:
 ; CHECK: bic
-define void @f(i32* nocapture %b, i32* nocapture %c, i32 %a) {
+define void @f(ptr nocapture %b, ptr nocapture %c, i32 %a) {
   %1 = and i32 %a, -4096
-  store i32 %1, i32* %c, align 4
+  store i32 %1, ptr %c, align 4
   %2 = and i32 %a, 4095
   %3 = or i32 %2, 4096
-  %4 = load i32, i32* %b, align 4
+  %4 = load i32, ptr %b, align 4
   %5 = add nsw i32 %4, %3
-  store i32 %5, i32* %b, align 4
+  store i32 %5, ptr %b, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/bti-indirect-branches.ll b/llvm/test/CodeGen/Thumb2/bti-indirect-branches.ll
index e89a8f40016a0..e238c67174c10 100644
--- a/llvm/test/CodeGen/Thumb2/bti-indirect-branches.ll
+++ b/llvm/test/CodeGen/Thumb2/bti-indirect-branches.ll
@@ -59,7 +59,7 @@ return:
   ret i32 %ret
 }
 
- at computed_goto_cases = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@computed_goto, %return), i8* blockaddress(@computed_goto, %case_1)], align 4
+ at computed_goto_cases = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@computed_goto, %return), ptr blockaddress(@computed_goto, %case_1)], align 4
 
 define internal i32 @computed_goto(i32 %x) {
 ; CHECK-LABEL: computed_goto:
@@ -80,9 +80,9 @@ define internal i32 @computed_goto(i32 %x) {
 ; CHECK-NEXT:    movs r0, #1
 ; CHECK-NEXT:    bx lr
 entry:
-  %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @computed_goto_cases, i32 0, i32 %x
-  %0 = load i8*, i8** %arrayidx, align 4
-  indirectbr i8* %0, [label %return, label %case_1]
+  %arrayidx = getelementptr inbounds [2 x ptr], ptr @computed_goto_cases, i32 0, i32 %x
+  %0 = load ptr, ptr %arrayidx, align 4
+  indirectbr ptr %0, [label %return, label %case_1]
 
 case_1:
   br label %return
@@ -93,10 +93,10 @@ return:
 }
 
 declare void @may_throw()
-declare void @consume_exception(i8*)
+declare void @consume_exception(ptr)
 declare i32 @__gxx_personality_v0(...)
 
-define internal i32 @exception_handling(i32 %0) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define internal i32 @exception_handling(i32 %0) personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: exception_handling:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    bti
@@ -119,10 +119,10 @@ entry:
           to label %return unwind label %lpad
 
 lpad:
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  %2 = extractvalue { i8*, i32 } %1, 0
-  call void @consume_exception(i8* %2)
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
+  %2 = extractvalue { ptr, i32 } %1, 0
+  call void @consume_exception(ptr %2)
   br label %return
 
 return:

diff  --git a/llvm/test/CodeGen/Thumb2/bti-outliner-1.ll b/llvm/test/CodeGen/Thumb2/bti-outliner-1.ll
index 03bdf454b8553..26005a44d8aaa 100644
--- a/llvm/test/CodeGen/Thumb2/bti-outliner-1.ll
+++ b/llvm/test/CodeGen/Thumb2/bti-outliner-1.ll
@@ -45,16 +45,16 @@ define hidden i32 @x(i32 %p) local_unnamed_addr #0 {
 ; CHECK-NEXT:  .LCPI0_0:
 ; CHECK-NEXT:    .long .L_MergedGlobals
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 1
   ret i32 %add3
@@ -75,16 +75,16 @@ define hidden i32 @y(i32 %p) local_unnamed_addr #1 {
 ; CHECK-NEXT:  .LCPI1_0:
 ; CHECK-NEXT:    .long .L_MergedGlobals
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 2
   ret i32 %add3
@@ -112,16 +112,16 @@ define hidden i32 @z(i32 %p) local_unnamed_addr #2 {
 ; CHECK-NEXT:  .LCPI2_0:
 ; CHECK-NEXT:    .long .L_MergedGlobals
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 3
   ret i32 %add3

diff  --git a/llvm/test/CodeGen/Thumb2/bti-outliner-2.ll b/llvm/test/CodeGen/Thumb2/bti-outliner-2.ll
index f5d2d393040c0..5492b96439a61 100644
--- a/llvm/test/CodeGen/Thumb2/bti-outliner-2.ll
+++ b/llvm/test/CodeGen/Thumb2/bti-outliner-2.ll
@@ -12,16 +12,16 @@
 
 define hidden i32 @x(i32 %p) local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 1
   ret i32 %add3
@@ -32,16 +32,16 @@ entry:
 
 define hidden i32 @y(i32 %p) local_unnamed_addr #1 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 2
   ret i32 %add3
@@ -52,16 +52,16 @@ entry:
 
 define hidden i32 @z(i32 %p) local_unnamed_addr #2 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
-  %5 = load volatile i32, i32* @f, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add2 = add nsw i32 %mul, %5
   %add3 = add nsw i32 %add2, 3
   ret i32 %add3

diff  --git a/llvm/test/CodeGen/Thumb2/bti-outliner-cost-1.ll b/llvm/test/CodeGen/Thumb2/bti-outliner-cost-1.ll
index 85358e694e20e..5f2c9fc32972d 100644
--- a/llvm/test/CodeGen/Thumb2/bti-outliner-cost-1.ll
+++ b/llvm/test/CodeGen/Thumb2/bti-outliner-cost-1.ll
@@ -25,14 +25,14 @@
 
 define hidden i32 @x(i32 %p) local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
   %add2 = add nsw i32 %mul, 1
   ret i32 %add2
@@ -42,14 +42,14 @@ entry:
 
 define hidden i32 @y(i32 %p) local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
   %add2 = add nsw i32 %mul, 2
   ret i32 %add2

diff  --git a/llvm/test/CodeGen/Thumb2/bti-outliner-cost-2.ll b/llvm/test/CodeGen/Thumb2/bti-outliner-cost-2.ll
index 376dc3a31068a..cf1d8170e37eb 100644
--- a/llvm/test/CodeGen/Thumb2/bti-outliner-cost-2.ll
+++ b/llvm/test/CodeGen/Thumb2/bti-outliner-cost-2.ll
@@ -10,14 +10,14 @@
 
 define hidden i32 @x(i32 %p) local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
   %add2 = add nsw i32 %mul, 1
   ret i32 %add2
@@ -27,14 +27,14 @@ entry:
 
 define hidden i32 @y(i32 %p) local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
+  %4 = load volatile i32, ptr @e, align 4
   %mul = mul nsw i32 %4, %div
   %add2 = add nsw i32 %mul, 2
   ret i32 %add2

diff  --git a/llvm/test/CodeGen/Thumb2/bug-subw.ll b/llvm/test/CodeGen/Thumb2/bug-subw.ll
index a9ca37e753b71..41d96116c67ac 100644
--- a/llvm/test/CodeGen/Thumb2/bug-subw.ll
+++ b/llvm/test/CodeGen/Thumb2/bug-subw.ll
@@ -5,65 +5,58 @@
 ; TODO: Missed optimization. The three instructions generated to subtract SP can be converged to a single one
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
 target triple = "thumbv7m-unknown-unknown"
-%B = type {%B*}
+%B = type {ptr}
 %R = type {i32}
-%U = type {%U*, i8, i8}
-%E = type {%B*, %U*}
+%U = type {ptr, i8, i8}
+%E = type {ptr, ptr}
 %X = type {i32, i8, i8}
-declare external [0 x i8]* @memalloc(i32, i32, i32)
-declare external void @memfree([0 x i8]*, i32, i32)
-define void @foo(%B* %pb$, %R* %pr$) nounwind {
+declare external ptr @memalloc(i32, i32, i32)
+declare external void @memfree(ptr, i32, i32)
+define void @foo(ptr %pb$, ptr %pr$) nounwind {
 L.0:
-	%pb = alloca %B*
-	%pr = alloca %R*
-	store %B* %pb$, %B** %pb
-	store %R* %pr$, %R** %pr
-	%pe = alloca %E*
-	%0 = load %B*, %B** %pb
-	%1 = bitcast %B* %0 to %E*
-	store %E* %1, %E** %pe
-	%2 = load %R*, %R** %pr
-	%3 = getelementptr %R, %R* %2, i32 0, i32 0
-	%4 = load i32, i32* %3
-	switch i32 %4, label %L.1 [
+	%pb = alloca ptr
+	%pr = alloca ptr
+	store ptr %pb$, ptr %pb
+	store ptr %pr$, ptr %pr
+	%pe = alloca ptr
+	%0 = load ptr, ptr %pb
+	store ptr %0, ptr %pe
+	%1 = load ptr, ptr %pr
+	%2 = load i32, ptr %1
+	switch i32 %2, label %L.1 [
 		i32 1, label %L.3
 	]
 L.3:
-	%px = alloca %X*
-	%5 = load %R*, %R** %pr
-	%6 = bitcast %R* %5 to %X*
-	store %X* %6, %X** %px
-	%7 = load %X*, %X** %px
-	%8 = getelementptr %X, %X* %7, i32 0, i32 0
-	%9 = load i32, i32* %8
-	%10 = icmp ne i32 %9, 0
-	br i1 %10, label %L.5, label %L.4
+	%px = alloca ptr
+	%3 = load ptr, ptr %pr
+	store ptr %3, ptr %px
+	%4 = load ptr, ptr %px
+	%5 = load i32, ptr %4
+	%6 = icmp ne i32 %5, 0
+	br i1 %6, label %L.5, label %L.4
 L.5:
-	%pu = alloca %U*
-	%11 = call [0 x i8]* @memalloc(i32 8, i32 4, i32 0)
-	%12 = bitcast [0 x i8]* %11 to %U*
-	store %U* %12, %U** %pu
-	%13 = load %X*, %X** %px
-	%14 = getelementptr %X, %X* %13, i32 0, i32 1
-	%15 = load i8, i8* %14
-	%16 = load %U*, %U** %pu
-	%17 = getelementptr %U, %U* %16, i32 0, i32 1
-	store i8 %15, i8* %17
-	%18 = load %E*, %E** %pe
-	%19 = getelementptr %E, %E* %18, i32 0, i32 1
-	%20 = load %U*, %U** %19
-	%21 = load %U*, %U** %pu
-	%22 = getelementptr %U, %U* %21, i32 0, i32 0
-	store %U* %20, %U** %22
-	%23 = load %U*, %U** %pu
-	%24 = load %E*, %E** %pe
-	%25 = getelementptr %E, %E* %24, i32 0, i32 1
-	store %U* %23, %U** %25
+	%pu = alloca ptr
+	%7 = call ptr @memalloc(i32 8, i32 4, i32 0)
+	store ptr %7, ptr %pu
+	%8 = load ptr, ptr %px
+	%9 = getelementptr %X, ptr %8, i32 0, i32 1
+	%10 = load i8, ptr %9
+	%11 = load ptr, ptr %pu
+	%12 = getelementptr %U, ptr %11, i32 0, i32 1
+	store i8 %10, ptr %12
+	%13 = load ptr, ptr %pe
+	%14 = getelementptr %E, ptr %13, i32 0, i32 1
+	%15 = load ptr, ptr %14
+	%16 = load ptr, ptr %pu
+	store ptr %15, ptr %16
+	%17 = load ptr, ptr %pu
+	%18 = load ptr, ptr %pe
+	%19 = getelementptr %E, ptr %18, i32 0, i32 1
+	store ptr %17, ptr %19
 	br label %L.4
 L.4:
-	%26 = load %X*, %X** %px
-	%27 = bitcast %X* %26 to [0 x i8]*
-	call void @memfree([0 x i8]* %27, i32 8, i32 0)
+	%20 = load ptr, ptr %px
+	call void @memfree(ptr %20, i32 8, i32 0)
 	br label %L.2
 L.1:
 	br label %L.2

diff  --git a/llvm/test/CodeGen/Thumb2/buildvector-crash.ll b/llvm/test/CodeGen/Thumb2/buildvector-crash.ll
index 16e2298522f54..877885a5696bc 100644
--- a/llvm/test/CodeGen/Thumb2/buildvector-crash.ll
+++ b/llvm/test/CodeGen/Thumb2/buildvector-crash.ll
@@ -10,7 +10,7 @@ bb8:                                              ; preds = %bb8, %bb.nph372
   %1 = fmul <4 x float> %0, undef
   %2 = fmul <4 x float> %1, undef
   %3 = fadd <4 x float> undef, %2
-  store <4 x float> %3, <4 x float>* undef, align 4
+  store <4 x float> %3, ptr undef, align 4
   br label %bb8
 ; CHECK-LABEL: RotateStarsFP_Vec:
 ; CHECK: vld1.64

diff  --git a/llvm/test/CodeGen/Thumb2/call-site-info-update.ll b/llvm/test/CodeGen/Thumb2/call-site-info-update.ll
index d21ec61114c57..3666cd07c1857 100644
--- a/llvm/test/CodeGen/Thumb2/call-site-info-update.ll
+++ b/llvm/test/CodeGen/Thumb2/call-site-info-update.ll
@@ -13,70 +13,62 @@ target triple = "thumbv7-unknown-linux-gnueabi"
 @l = dso_local local_unnamed_addr global i8 0, align 1, !dbg !0
 
 ; Function Attrs: nounwind
-define dso_local zeroext i1 @_ZN1z2agEv(%class.z* nocapture readonly %this) local_unnamed_addr align 2 !dbg !17 {
+define dso_local zeroext i1 @_ZN1z2agEv(ptr nocapture readonly %this) local_unnamed_addr align 2 !dbg !17 {
 entry:
   %a = alloca %struct.y, align 1
   %b = alloca %struct.y, align 1
   %c = alloca %class.ae, align 1
-  call void @llvm.dbg.value(metadata %class.z* %this, metadata !28, metadata !DIExpression()), !dbg !75
-  %s = getelementptr inbounds %class.z, %class.z* %this, i32 0, i32 0, !dbg !75
-  %0 = load i8, i8* %s, align 1, !dbg !75
+  call void @llvm.dbg.value(metadata ptr %this, metadata !28, metadata !DIExpression()), !dbg !75
+  %0 = load i8, ptr %this, align 1, !dbg !75
   %tobool = icmp eq i8 %0, 0, !dbg !75
   br i1 %tobool, label %if.end, label %if.then, !dbg !75
 
 if.then:                                          ; preds = %entry
-  %1 = getelementptr inbounds %struct.y, %struct.y* %a, i32 0, i32 0, i32 0, i32 0, !dbg !82
-  %u.i = getelementptr inbounds %struct.y, %struct.y* %a, i32 0, i32 0, !dbg !94
-  %n.i.i = getelementptr inbounds %struct.y, %struct.y* %a, i32 0, i32 0, i32 0, !dbg !82
-  %call.i.i = call %"struct.ac::m"* @_ZN2ac1mC1EPc(%"struct.ac::m"* nonnull %n.i.i, i8* null), !dbg !82
-  %call2.i.i = call i8* @_ZN2ac2adEv(%class.ac* nonnull %u.i), !dbg !82
-  %cmp.i.i = icmp eq i8* %call2.i.i, null, !dbg !82
+  %call.i.i = call ptr @_ZN2ac1mC1EPc(ptr nonnull %a, ptr null), !dbg !82
+  %call2.i.i = call ptr @_ZN2ac2adEv(ptr nonnull %a), !dbg !82
+  %cmp.i.i = icmp eq ptr %call2.i.i, null, !dbg !82
   %frombool.i.i = zext i1 %cmp.i.i to i8, !dbg !82
-  store i8 %frombool.i.i, i8* @l, align 1, !dbg !82
+  store i8 %frombool.i.i, ptr @l, align 1, !dbg !82
   br i1 %cmp.i.i, label %_ZN1yC2Ev.exit, label %if.then.i.i, !dbg !82
 
 if.then.i.i:                                      ; preds = %if.then
   call void @llvm.dbg.value(metadata i32 1, metadata !144, metadata !DIExpression()), !dbg !145
-  call void @_ZdlPv(i8* null), !dbg !145
+  call void @_ZdlPv(ptr null), !dbg !145
   br label %_ZN1yC2Ev.exit, !dbg !145
 
 _ZN1yC2Ev.exit:                                   ; preds = %if.then, %if.then.i.i
   call void @llvm.dbg.value(metadata i8 1, metadata !31, metadata !DIExpression()), !dbg !75
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %1), !dbg !75
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a), !dbg !75
   br label %cleanup
 
 if.end:                                           ; preds = %entry
-  %ah = getelementptr inbounds %class.z, %class.z* %this, i32 0, i32 1, !dbg !150
-  %2 = load i8, i8* %ah, align 1, !dbg !150
-  %tobool3 = icmp eq i8 %2, 0, !dbg !150
+  %ah = getelementptr inbounds %class.z, ptr %this, i32 0, i32 1, !dbg !150
+  %1 = load i8, ptr %ah, align 1, !dbg !150
+  %tobool3 = icmp eq i8 %1, 0, !dbg !150
   br i1 %tobool3, label %if.end7, label %if.then4, !dbg !150
 
 if.then4:                                         ; preds = %if.end
-  %3 = getelementptr inbounds %struct.y, %struct.y* %b, i32 0, i32 0, i32 0, i32 0, !dbg !153
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3), !dbg !153
-  %u.i11 = getelementptr inbounds %struct.y, %struct.y* %b, i32 0, i32 0, !dbg !153
-  %n.i.i12 = getelementptr inbounds %struct.y, %struct.y* %b, i32 0, i32 0, i32 0, !dbg !153
-  %call.i.i13 = call %"struct.ac::m"* @_ZN2ac1mC1EPc(%"struct.ac::m"* nonnull %n.i.i12, i8* null), !dbg !153
-  %call2.i.i14 = call i8* @_ZN2ac2adEv(%class.ac* nonnull %u.i11), !dbg !153
-  %cmp.i.i15 = icmp eq i8* %call2.i.i14, null, !dbg !153
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %b), !dbg !153
+  %call.i.i13 = call ptr @_ZN2ac1mC1EPc(ptr nonnull %b, ptr null), !dbg !153
+  %call2.i.i14 = call ptr @_ZN2ac2adEv(ptr nonnull %b), !dbg !153
+  %cmp.i.i15 = icmp eq ptr %call2.i.i14, null, !dbg !153
   %frombool.i.i16 = zext i1 %cmp.i.i15 to i8, !dbg !153
-  store i8 %frombool.i.i16, i8* @l, align 1, !dbg !153
+  store i8 %frombool.i.i16, ptr @l, align 1, !dbg !153
   br i1 %cmp.i.i15, label %_ZN1yC2Ev.exit18, label %if.then.i.i17, !dbg !153
 
 if.then.i.i17:                                    ; preds = %if.then4
-  call void @_ZdlPv(i8* null), !dbg !170
+  call void @_ZdlPv(ptr null), !dbg !170
   br label %_ZN1yC2Ev.exit18, !dbg !170
 
 _ZN1yC2Ev.exit18:                                 ; preds = %if.then4, %if.then.i.i17
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %3), !dbg !173
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %b), !dbg !173
   br label %cleanup
 
 if.end7:                                          ; preds = %if.end
-  %4 = getelementptr inbounds %class.ae, %class.ae* %c, i32 0, i32 0, !dbg !173
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4), !dbg !173
-  %call8 = call %class.ae* @_ZN2aeC1Ei(%class.ae* nonnull %c, i32 1), !dbg !173
-  call void @_ZN2ae1xES_(%class.ae* nonnull %c, [1 x i32] zeroinitializer), !dbg !173
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %4), !dbg !173
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %c), !dbg !173
+  %call8 = call ptr @_ZN2aeC1Ei(ptr nonnull %c, i32 1), !dbg !173
+  call void @_ZN2ae1xES_(ptr nonnull %c, [1 x i32] zeroinitializer), !dbg !173
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %c), !dbg !173
   br label %cleanup
 
 cleanup:                                          ; preds = %if.end7, %_ZN1yC2Ev.exit18, %_ZN1yC2Ev.exit
@@ -85,21 +77,21 @@ cleanup:                                          ; preds = %if.end7, %_ZN1yC2Ev
 }
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-declare dso_local %class.ae* @_ZN2aeC1Ei(%class.ae* returned, i32) unnamed_addr
+declare dso_local ptr @_ZN2aeC1Ei(ptr returned, i32) unnamed_addr
 
-declare dso_local void @_ZN2ae1xES_(%class.ae*, [1 x i32]) local_unnamed_addr
+declare dso_local void @_ZN2ae1xES_(ptr, [1 x i32]) local_unnamed_addr
 
-declare dso_local %"struct.ac::m"* @_ZN2ac1mC1EPc(%"struct.ac::m"* returned, i8*) unnamed_addr
+declare dso_local ptr @_ZN2ac1mC1EPc(ptr returned, ptr) unnamed_addr
 
-declare dso_local i8* @_ZN2ac2adEv(%class.ac*) local_unnamed_addr
+declare dso_local ptr @_ZN2ac2adEv(ptr) local_unnamed_addr
 
 ; Function Attrs: nobuiltin nounwind
-declare !dbg !6 dso_local void @_ZdlPv(i8*) local_unnamed_addr
+declare !dbg !6 dso_local void @_ZdlPv(ptr) local_unnamed_addr
 
 ; Function Attrs: nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata)

diff  --git a/llvm/test/CodeGen/Thumb2/cmp-frame.ll b/llvm/test/CodeGen/Thumb2/cmp-frame.ll
index ceb9697ecb890..bbf6a278c7985 100644
--- a/llvm/test/CodeGen/Thumb2/cmp-frame.ll
+++ b/llvm/test/CodeGen/Thumb2/cmp-frame.ll
@@ -3,9 +3,9 @@
 target triple = "thumbv7-linux-androideabi"
 
 define i1 @f() {
-  %a = alloca i8*
+  %a = alloca ptr
   ; CHECK: adds.w r0, sp, #0
   ; CHECK: it ne
-  %cmp = icmp ne i8** %a, null
+  %cmp = icmp ne ptr %a, null
   ret i1 %cmp
 }

diff  --git a/llvm/test/CodeGen/Thumb2/constant-hoisting.ll b/llvm/test/CodeGen/Thumb2/constant-hoisting.ll
index 5a2c01031d3c4..98fe30039259f 100644
--- a/llvm/test/CodeGen/Thumb2/constant-hoisting.ll
+++ b/llvm/test/CodeGen/Thumb2/constant-hoisting.ll
@@ -90,7 +90,7 @@ return:                                           ; preds = %entry, %sw.bb5, %sw
   ret i32 %retval.0
 }
 
-define i32 @test_addr(i32 %a, i8* nocapture readonly %b) {
+define i32 @test_addr(i32 %a, ptr nocapture readonly %b) {
 ; CHECK-V6M-LABEL: test_addr:
 ; CHECK-V6M:         mov r2, r0
 ; CHECK-V6M-NEXT:    movs r0, #19
@@ -165,8 +165,8 @@ sw.bb7:                                           ; preds = %entry
 
 return.sink.split:                                ; preds = %entry, %sw.bb1, %sw.bb4, %sw.bb7
   %.sink = phi i32 [ 307, %sw.bb7 ], [ 306, %sw.bb4 ], [ 305, %sw.bb1 ], [ 304, %entry ]
-  %arrayidx8 = getelementptr inbounds i8, i8* %b, i32 %.sink
-  %0 = load i8, i8* %arrayidx8, align 1
+  %arrayidx8 = getelementptr inbounds i8, ptr %b, i32 %.sink
+  %0 = load i8, ptr %arrayidx8, align 1
   %phitmp = zext i8 %0 to i32
   br label %return
 

diff  --git a/llvm/test/CodeGen/Thumb2/constant-islands-cbz.ll b/llvm/test/CodeGen/Thumb2/constant-islands-cbz.ll
index 3f1a8dd75632f..fc42d4e0e4d8c 100644
--- a/llvm/test/CodeGen/Thumb2/constant-islands-cbz.ll
+++ b/llvm/test/CodeGen/Thumb2/constant-islands-cbz.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=thumbv8m.base-none-eabi %s -o - | FileCheck %s --check-prefix=CHECK-T1
 ; RUN: llc < %s -mtriple=thumbv8m.main-none-eabi %s -o - | FileCheck %s --check-prefix=CHECK-T2
 
-define i32* @test(i32* returned %this, i32 %event_size, i8* %event_pointer) {
+define ptr @test(ptr returned %this, i32 %event_size, ptr %event_pointer) {
 ; CHECK-T1-LABEL: test:
 ; CHECK-T1:       @ %bb.0: @ %entry
 ; CHECK-T1-NEXT:    .save {r4, lr}
@@ -43,25 +43,23 @@ define i32* @test(i32* returned %this, i32 %event_size, i8* %event_pointer) {
 ; CHECK-T2-NEXT:    mov r0, r4
 ; CHECK-T2-NEXT:    pop {r4, pc}
 entry:
-  %_update = getelementptr inbounds i32, i32* %this, i32 1
-  %0 = bitcast i32* %_update to i8*
-  tail call void @llvm.memset.p0i8.i32(i8* nonnull align 4 %0, i8 0, i32 16, i1 false) #4
-  %tobool = icmp eq i8* %event_pointer, null
-  %_equeue5 = getelementptr inbounds i32, i32* %this, i32 0
+  %_update = getelementptr inbounds i32, ptr %this, i32 1
+  tail call void @llvm.memset.p0.i32(ptr nonnull align 4 %_update, i8 0, i32 16, i1 false) #4
+  %tobool = icmp eq ptr %event_pointer, null
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  %call4 = tail call i32 @equeue_create(i32* %_equeue5, i32 %event_size) #5
+  %call4 = tail call i32 @equeue_create(ptr %this, i32 %event_size) #5
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  %call6 = tail call i32 @equeue_create_inplace(i32* %_equeue5, i32 %event_size, i8* nonnull %event_pointer) #5
+  %call6 = tail call i32 @equeue_create_inplace(ptr %this, i32 %event_size, ptr nonnull %event_pointer) #5
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  ret i32* %this
+  ret ptr %this
 }
 
-declare dso_local i32 @equeue_create(i32*, i32) local_unnamed_addr #1
-declare dso_local i32 @equeue_create_inplace(i32*, i32, i8*) local_unnamed_addr #1
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg) #2
+declare dso_local i32 @equeue_create(ptr, i32) local_unnamed_addr #1
+declare dso_local i32 @equeue_create_inplace(ptr, i32, ptr) local_unnamed_addr #1
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) #2

diff  --git a/llvm/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll b/llvm/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
index 428ef73955862..cf04d5acc0ab9 100644
--- a/llvm/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
+++ b/llvm/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
@@ -15,22 +15,22 @@ declare i32 @llvm.arm.space(i32, i32)
 
 define i32 @testpadding(i32 %a) {
 entry:
-  %0 = load i32, i32* @g0, align 4
+  %0 = load i32, ptr @g0, align 4
   %add = add nsw i32 %0, 12
-  store i32 %add, i32* @g0, align 4
-  %1 = load double, double* @d0, align 8
+  store i32 %add, ptr @g0, align 4
+  %1 = load double, ptr @d0, align 8
   %add1 = fadd double %1, 0x3FF3C0B8ED46EACB
-  store double %add1, double* @d0, align 8
+  store double %add1, ptr @d0, align 8
   %tmpcall11 = call i32 @llvm.arm.space(i32 28, i32 undef)
   call void @foo20(i32 191)
-  %2 = load float, float* @f0, align 4
+  %2 = load float, ptr @f0, align 4
   %add2 = fadd float %2, 0x3FF3C0BDC0000000
-  store float %add2, float* @f0, align 4
+  store float %add2, ptr @f0, align 4
   br label %do.body
 
 do.body:                                          ; preds = %do.body, %entry
   tail call void @foo20(i32 19)
-  %3 = load i32, i32* @g1, align 4
+  %3 = load i32, ptr @g1, align 4
   %tobool = icmp eq i32 %3, 0
   br i1 %tobool, label %do.end, label %do.body
 

diff  --git a/llvm/test/CodeGen/Thumb2/constant-islands.ll b/llvm/test/CodeGen/Thumb2/constant-islands.ll
index 9021b8c4e95da..0fab3d04fa43e 100644
--- a/llvm/test/CodeGen/Thumb2/constant-islands.ll
+++ b/llvm/test/CodeGen/Thumb2/constant-islands.ll
@@ -22,26 +22,26 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
 %class.btCapsuleShape = type { %class.btConvexInternalShape, i32 }
 %class.btConvexInternalShape = type { %class.btConvexShape, %class.btVector3, %class.btVector3, float, float }
 %class.btConvexShape = type { %class.btCollisionShape }
-%class.btCollisionShape = type { i32 (...)**, i32, i8* }
-%class.RagDoll = type { i32 (...)**, %class.btDynamicsWorld*, [11 x %class.btCollisionShape*], [11 x %class.btRigidBody*], [10 x %class.btTypedConstraint*] }
-%class.btDynamicsWorld = type { %class.btCollisionWorld, void (%class.btDynamicsWorld*, float)*, void (%class.btDynamicsWorld*, float)*, i8*, %struct.btContactSolverInfo }
-%class.btCollisionWorld = type { i32 (...)**, %class.btAlignedObjectArray, %class.btDispatcher*, %struct.btDispatcherInfo, %class.btStackAlloc*, %class.btBroadphaseInterface*, %class.btIDebugDraw*, i8 }
-%class.btAlignedObjectArray = type { %class.btAlignedAllocator, i32, i32, %class.btCollisionObject**, i8 }
+%class.btCollisionShape = type { ptr, i32, ptr }
+%class.RagDoll = type { ptr, ptr, [11 x ptr], [11 x ptr], [10 x ptr] }
+%class.btDynamicsWorld = type { %class.btCollisionWorld, ptr, ptr, ptr, %struct.btContactSolverInfo }
+%class.btCollisionWorld = type { ptr, %class.btAlignedObjectArray, ptr, %struct.btDispatcherInfo, ptr, ptr, ptr, i8 }
+%class.btAlignedObjectArray = type { %class.btAlignedAllocator, i32, i32, ptr, i8 }
 %class.btAlignedAllocator = type { i8 }
-%class.btCollisionObject = type { i32 (...)**, %class.btTransform, %class.btTransform, %class.btVector3, %class.btVector3, %class.btVector3, i8, float, %struct.btBroadphaseProxy*, %class.btCollisionShape*, %class.btCollisionShape*, i32, i32, i32, i32, float, float, float, i8*, i32, float, float, float, i8, [7 x i8] }
-%struct.btBroadphaseProxy = type { i8*, i16, i16, i8*, i32, %class.btVector3, %class.btVector3 }
-%class.btDispatcher = type { i32 (...)** }
-%struct.btDispatcherInfo = type { float, i32, i32, float, i8, %class.btIDebugDraw*, i8, i8, i8, float, i8, float, %class.btStackAlloc* }
-%class.btIDebugDraw = type { i32 (...)** }
+%class.btCollisionObject = type { ptr, %class.btTransform, %class.btTransform, %class.btVector3, %class.btVector3, %class.btVector3, i8, float, ptr, ptr, ptr, i32, i32, i32, i32, float, float, float, ptr, i32, float, float, float, i8, [7 x i8] }
+%struct.btBroadphaseProxy = type { ptr, i16, i16, ptr, i32, %class.btVector3, %class.btVector3 }
+%class.btDispatcher = type { ptr }
+%struct.btDispatcherInfo = type { float, i32, i32, float, i8, ptr, i8, i8, i8, float, i8, float, ptr }
+%class.btIDebugDraw = type { ptr }
 %class.btStackAlloc = type opaque
-%class.btBroadphaseInterface = type { i32 (...)** }
+%class.btBroadphaseInterface = type { ptr }
 %struct.btContactSolverInfo = type { %struct.btContactSolverInfoData }
 %struct.btContactSolverInfoData = type { float, float, float, float, float, i32, float, float, float, float, float, i32, float, float, float, i32, i32 }
-%class.btRigidBody = type { %class.btCollisionObject, %class.btMatrix3x3, %class.btVector3, %class.btVector3, float, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, float, float, i8, float, float, float, float, float, float, %class.btMotionState*, %class.btAlignedObjectArray.22, i32, i32, i32 }
-%class.btMotionState = type { i32 (...)** }
-%class.btAlignedObjectArray.22 = type { %class.btAlignedAllocator.23, i32, i32, %class.btTypedConstraint**, i8 }
+%class.btRigidBody = type { %class.btCollisionObject, %class.btMatrix3x3, %class.btVector3, %class.btVector3, float, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, float, float, i8, float, float, float, float, float, float, ptr, %class.btAlignedObjectArray.22, i32, i32, i32 }
+%class.btMotionState = type { ptr }
+%class.btAlignedObjectArray.22 = type { %class.btAlignedAllocator.23, i32, i32, ptr, i8 }
 %class.btAlignedAllocator.23 = type { i8 }
-%class.btTypedConstraint = type { i32 (...)**, %struct.btTypedObject, i32, i32, i8, %class.btRigidBody*, %class.btRigidBody*, float, float, %class.btVector3, %class.btVector3, %class.btVector3 }
+%class.btTypedConstraint = type { ptr, %struct.btTypedObject, i32, i32, i8, ptr, ptr, float, float, %class.btVector3, %class.btVector3, %class.btVector3 }
 %struct.btTypedObject = type { i32 }
 %class.btHingeConstraint = type { %class.btTypedConstraint, [3 x %class.btJacobianEntry], [3 x %class.btJacobianEntry], %class.btTransform, %class.btTransform, float, float, float, float, float, float, float, float, float, float, float, float, float, i8, i8, i8, i8, i8, float }
 %class.btJacobianEntry = type { %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, %class.btVector3, float }
@@ -49,40 +49,40 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
 %class.btQuaternion = type { %class.btQuadWord }
 %class.btQuadWord = type { [4 x float] }
 
- at _ZTV7RagDoll = external unnamed_addr constant [4 x i8*]
+ at _ZTV7RagDoll = external unnamed_addr constant [4 x ptr]
 
-declare noalias i8* @_Znwm(i32)
+declare noalias ptr @_Znwm(i32)
 
 declare i32 @__gxx_personality_sj0(...)
 
-declare void @_ZdlPv(i8*) nounwind
+declare void @_ZdlPv(ptr) nounwind
 
-declare %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3*, float*, float*, float*) unnamed_addr inlinehint ssp align 2
+declare ptr @_ZN9btVector3C1ERKfS1_S1_(ptr, ptr, ptr, ptr) unnamed_addr inlinehint ssp align 2
 
 declare void @_ZSt9terminatev()
 
-declare %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform*) unnamed_addr ssp align 2
+declare ptr @_ZN11btTransformC1Ev(ptr) unnamed_addr ssp align 2
 
-declare void @_ZN11btTransform11setIdentityEv(%class.btTransform*) ssp align 2
+declare void @_ZN11btTransform11setIdentityEv(ptr) ssp align 2
 
-declare void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform*, %class.btVector3*) nounwind inlinehint ssp align 2
+declare void @_ZN11btTransform9setOriginERK9btVector3(ptr, ptr) nounwind inlinehint ssp align 2
 
-declare i8* @_ZN13btConvexShapenwEm(i32) inlinehint ssp align 2
+declare ptr @_ZN13btConvexShapenwEm(i32) inlinehint ssp align 2
 
-declare void @_ZN13btConvexShapedlEPv(i8*) inlinehint ssp align 2
+declare void @_ZN13btConvexShapedlEPv(ptr) inlinehint ssp align 2
 
-declare %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape*, float, float)
+declare ptr @_ZN14btCapsuleShapeC1Eff(ptr, float, float)
 
-declare %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform*) nounwind inlinehint ssp align 2
+declare ptr @_ZN11btTransform8getBasisEv(ptr) nounwind inlinehint ssp align 2
 
-define %class.RagDoll* @_ZN7RagDollC2EP15btDynamicsWorldRK9btVector3f(%class.RagDoll* %this, %class.btDynamicsWorld* %ownerWorld, %class.btVector3* %positionOffset, float %scale) unnamed_addr ssp align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
+define ptr @_ZN7RagDollC2EP15btDynamicsWorldRK9btVector3f(ptr %this, ptr %ownerWorld, ptr %positionOffset, float %scale) unnamed_addr ssp align 2 personality ptr @__gxx_personality_sj0 {
 entry:
-  %retval = alloca %class.RagDoll*, align 4
-  %this.addr = alloca %class.RagDoll*, align 4
-  %ownerWorld.addr = alloca %class.btDynamicsWorld*, align 4
-  %positionOffset.addr = alloca %class.btVector3*, align 4
+  %retval = alloca ptr, align 4
+  %this.addr = alloca ptr, align 4
+  %ownerWorld.addr = alloca ptr, align 4
+  %positionOffset.addr = alloca ptr, align 4
   %scale.addr = alloca float, align 4
-  %exn.slot = alloca i8*
+  %exn.slot = alloca ptr
   %ehselector.slot = alloca i32
   %offset = alloca %class.btTransform, align 4
   %transform = alloca %class.btTransform, align 4
@@ -153,8 +153,8 @@ entry:
   %ref.tmp222 = alloca float, align 4
   %ref.tmp225 = alloca %class.btTransform, align 4
   %i = alloca i32, align 4
-  %hingeC = alloca %class.btHingeConstraint*, align 4
-  %coneC = alloca %class.btConeTwistConstraint*, align 4
+  %hingeC = alloca ptr, align 4
+  %coneC = alloca ptr, align 4
   %localA = alloca %class.btTransform, align 4
   %localB = alloca %class.btTransform, align 4
   %ref.tmp240 = alloca %class.btVector3, align 4
@@ -257,1143 +257,1081 @@ entry:
   %ref.tmp507 = alloca float, align 4
   %ref.tmp508 = alloca float, align 4
   %ref.tmp509 = alloca float, align 4
-  store %class.RagDoll* %this, %class.RagDoll** %this.addr, align 4
-  store %class.btDynamicsWorld* %ownerWorld, %class.btDynamicsWorld** %ownerWorld.addr, align 4
-  store %class.btVector3* %positionOffset, %class.btVector3** %positionOffset.addr, align 4
-  store float %scale, float* %scale.addr, align 4
-  %this1 = load %class.RagDoll*, %class.RagDoll** %this.addr
-  store %class.RagDoll* %this1, %class.RagDoll** %retval
-  %0 = bitcast %class.RagDoll* %this1 to i8***
-  store i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @_ZTV7RagDoll, i64 0, i64 2), i8*** %0
-  %m_ownerWorld = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %1 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %ownerWorld.addr, align 4
-  store %class.btDynamicsWorld* %1, %class.btDynamicsWorld** %m_ownerWorld, align 4
-  %call = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %2 = bitcast i8* %call to %class.btCapsuleShape*
-  %3 = load float, float* %scale.addr, align 4
-  %mul = fmul float 0x3FC3333340000000, %3
-  %4 = load float, float* %scale.addr, align 4
-  %mul2 = fmul float 0x3FC99999A0000000, %4
-  %call3 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %2, float %mul, float %mul2)
+  store ptr %this, ptr %this.addr, align 4
+  store ptr %ownerWorld, ptr %ownerWorld.addr, align 4
+  store ptr %positionOffset, ptr %positionOffset.addr, align 4
+  store float %scale, ptr %scale.addr, align 4
+  %this1 = load ptr, ptr %this.addr
+  store ptr %this1, ptr %retval
+  store ptr getelementptr inbounds ([4 x ptr], ptr @_ZTV7RagDoll, i64 0, i64 2), ptr %this1
+  %m_ownerWorld = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %0 = load ptr, ptr %ownerWorld.addr, align 4
+  store ptr %0, ptr %m_ownerWorld, align 4
+  %call = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %1 = load float, ptr %scale.addr, align 4
+  %mul = fmul float 0x3FC3333340000000, %1
+  %2 = load float, ptr %scale.addr, align 4
+  %mul2 = fmul float 0x3FC99999A0000000, %2
+  %call3 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call, float %mul, float %mul2)
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  %5 = bitcast %class.btCapsuleShape* %2 to %class.btCollisionShape*
-  %m_shapes = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes, i32 0, i32 0
-  store %class.btCollisionShape* %5, %class.btCollisionShape** %arrayidx, align 4
-  %call5 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %6 = bitcast i8* %call5 to %class.btCapsuleShape*
-  %7 = load float, float* %scale.addr, align 4
-  %mul6 = fmul float 0x3FC3333340000000, %7
-  %8 = load float, float* %scale.addr, align 4
-  %mul7 = fmul float 0x3FD1EB8520000000, %8
-  %call10 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %6, float %mul6, float %mul7)
+  %m_shapes = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  store ptr %call, ptr %m_shapes, align 4
+  %call5 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %3 = load float, ptr %scale.addr, align 4
+  %mul6 = fmul float 0x3FC3333340000000, %3
+  %4 = load float, ptr %scale.addr, align 4
+  %mul7 = fmul float 0x3FD1EB8520000000, %4
+  %call10 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call5, float %mul6, float %mul7)
           to label %invoke.cont9 unwind label %lpad8
 
 invoke.cont9:                                     ; preds = %invoke.cont
-  %9 = bitcast %class.btCapsuleShape* %6 to %class.btCollisionShape*
-  %m_shapes12 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx13 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes12, i32 0, i32 1
-  store %class.btCollisionShape* %9, %class.btCollisionShape** %arrayidx13, align 4
-  %call14 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %10 = bitcast i8* %call14 to %class.btCapsuleShape*
-  %11 = load float, float* %scale.addr, align 4
-  %mul15 = fmul float 0x3FB99999A0000000, %11
-  %12 = load float, float* %scale.addr, align 4
-  %mul16 = fmul float 0x3FA99999A0000000, %12
-  %call19 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %10, float %mul15, float %mul16)
+  %m_shapes12 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx13 = getelementptr inbounds [11 x ptr], ptr %m_shapes12, i32 0, i32 1
+  store ptr %call5, ptr %arrayidx13, align 4
+  %call14 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %5 = load float, ptr %scale.addr, align 4
+  %mul15 = fmul float 0x3FB99999A0000000, %5
+  %6 = load float, ptr %scale.addr, align 4
+  %mul16 = fmul float 0x3FA99999A0000000, %6
+  %call19 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call14, float %mul15, float %mul16)
           to label %invoke.cont18 unwind label %lpad17
 
 invoke.cont18:                                    ; preds = %invoke.cont9
-  %13 = bitcast %class.btCapsuleShape* %10 to %class.btCollisionShape*
-  %m_shapes21 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx22 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes21, i32 0, i32 2
-  store %class.btCollisionShape* %13, %class.btCollisionShape** %arrayidx22, align 4
-  %call23 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %14 = bitcast i8* %call23 to %class.btCapsuleShape*
-  %15 = load float, float* %scale.addr, align 4
-  %mul24 = fmul float 0x3FB1EB8520000000, %15
-  %16 = load float, float* %scale.addr, align 4
-  %mul25 = fmul float 0x3FDCCCCCC0000000, %16
-  %call28 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %14, float %mul24, float %mul25)
+  %m_shapes21 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx22 = getelementptr inbounds [11 x ptr], ptr %m_shapes21, i32 0, i32 2
+  store ptr %call14, ptr %arrayidx22, align 4
+  %call23 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %7 = load float, ptr %scale.addr, align 4
+  %mul24 = fmul float 0x3FB1EB8520000000, %7
+  %8 = load float, ptr %scale.addr, align 4
+  %mul25 = fmul float 0x3FDCCCCCC0000000, %8
+  %call28 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call23, float %mul24, float %mul25)
           to label %invoke.cont27 unwind label %lpad26
 
 invoke.cont27:                                    ; preds = %invoke.cont18
-  %17 = bitcast %class.btCapsuleShape* %14 to %class.btCollisionShape*
-  %m_shapes30 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx31 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes30, i32 0, i32 3
-  store %class.btCollisionShape* %17, %class.btCollisionShape** %arrayidx31, align 4
-  %call32 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %18 = bitcast i8* %call32 to %class.btCapsuleShape*
-  %19 = load float, float* %scale.addr, align 4
-  %mul33 = fmul float 0x3FA99999A0000000, %19
-  %20 = load float, float* %scale.addr, align 4
-  %mul34 = fmul float 0x3FD7AE1480000000, %20
-  %call37 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %18, float %mul33, float %mul34)
+  %m_shapes30 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx31 = getelementptr inbounds [11 x ptr], ptr %m_shapes30, i32 0, i32 3
+  store ptr %call23, ptr %arrayidx31, align 4
+  %call32 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %9 = load float, ptr %scale.addr, align 4
+  %mul33 = fmul float 0x3FA99999A0000000, %9
+  %10 = load float, ptr %scale.addr, align 4
+  %mul34 = fmul float 0x3FD7AE1480000000, %10
+  %call37 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call32, float %mul33, float %mul34)
           to label %invoke.cont36 unwind label %lpad35
 
 invoke.cont36:                                    ; preds = %invoke.cont27
-  %21 = bitcast %class.btCapsuleShape* %18 to %class.btCollisionShape*
-  %m_shapes39 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx40 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes39, i32 0, i32 4
-  store %class.btCollisionShape* %21, %class.btCollisionShape** %arrayidx40, align 4
-  %call41 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %22 = bitcast i8* %call41 to %class.btCapsuleShape*
-  %23 = load float, float* %scale.addr, align 4
-  %mul42 = fmul float 0x3FB1EB8520000000, %23
-  %24 = load float, float* %scale.addr, align 4
-  %mul43 = fmul float 0x3FDCCCCCC0000000, %24
-  %call46 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %22, float %mul42, float %mul43)
+  %m_shapes39 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx40 = getelementptr inbounds [11 x ptr], ptr %m_shapes39, i32 0, i32 4
+  store ptr %call32, ptr %arrayidx40, align 4
+  %call41 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %11 = load float, ptr %scale.addr, align 4
+  %mul42 = fmul float 0x3FB1EB8520000000, %11
+  %12 = load float, ptr %scale.addr, align 4
+  %mul43 = fmul float 0x3FDCCCCCC0000000, %12
+  %call46 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call41, float %mul42, float %mul43)
           to label %invoke.cont45 unwind label %lpad44
 
 invoke.cont45:                                    ; preds = %invoke.cont36
-  %25 = bitcast %class.btCapsuleShape* %22 to %class.btCollisionShape*
-  %m_shapes48 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx49 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes48, i32 0, i32 5
-  store %class.btCollisionShape* %25, %class.btCollisionShape** %arrayidx49, align 4
-  %call50 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %26 = bitcast i8* %call50 to %class.btCapsuleShape*
-  %27 = load float, float* %scale.addr, align 4
-  %mul51 = fmul float 0x3FA99999A0000000, %27
-  %28 = load float, float* %scale.addr, align 4
-  %mul52 = fmul float 0x3FD7AE1480000000, %28
-  %call55 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %26, float %mul51, float %mul52)
+  %m_shapes48 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx49 = getelementptr inbounds [11 x ptr], ptr %m_shapes48, i32 0, i32 5
+  store ptr %call41, ptr %arrayidx49, align 4
+  %call50 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %13 = load float, ptr %scale.addr, align 4
+  %mul51 = fmul float 0x3FA99999A0000000, %13
+  %14 = load float, ptr %scale.addr, align 4
+  %mul52 = fmul float 0x3FD7AE1480000000, %14
+  %call55 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call50, float %mul51, float %mul52)
           to label %invoke.cont54 unwind label %lpad53
 
 invoke.cont54:                                    ; preds = %invoke.cont45
-  %29 = bitcast %class.btCapsuleShape* %26 to %class.btCollisionShape*
-  %m_shapes57 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx58 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes57, i32 0, i32 6
-  store %class.btCollisionShape* %29, %class.btCollisionShape** %arrayidx58, align 4
-  %call59 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %30 = bitcast i8* %call59 to %class.btCapsuleShape*
-  %31 = load float, float* %scale.addr, align 4
-  %mul60 = fmul float 0x3FA99999A0000000, %31
-  %32 = load float, float* %scale.addr, align 4
-  %mul61 = fmul float 0x3FD51EB860000000, %32
-  %call64 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %30, float %mul60, float %mul61)
+  %m_shapes57 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx58 = getelementptr inbounds [11 x ptr], ptr %m_shapes57, i32 0, i32 6
+  store ptr %call50, ptr %arrayidx58, align 4
+  %call59 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %15 = load float, ptr %scale.addr, align 4
+  %mul60 = fmul float 0x3FA99999A0000000, %15
+  %16 = load float, ptr %scale.addr, align 4
+  %mul61 = fmul float 0x3FD51EB860000000, %16
+  %call64 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call59, float %mul60, float %mul61)
           to label %invoke.cont63 unwind label %lpad62
 
 invoke.cont63:                                    ; preds = %invoke.cont54
-  %33 = bitcast %class.btCapsuleShape* %30 to %class.btCollisionShape*
-  %m_shapes66 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx67 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes66, i32 0, i32 7
-  store %class.btCollisionShape* %33, %class.btCollisionShape** %arrayidx67, align 4
-  %call68 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %34 = bitcast i8* %call68 to %class.btCapsuleShape*
-  %35 = load float, float* %scale.addr, align 4
-  %mul69 = fmul float 0x3FA47AE140000000, %35
-  %36 = load float, float* %scale.addr, align 4
-  %mul70 = fmul float 2.500000e-01, %36
-  %call73 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %34, float %mul69, float %mul70)
+  %m_shapes66 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx67 = getelementptr inbounds [11 x ptr], ptr %m_shapes66, i32 0, i32 7
+  store ptr %call59, ptr %arrayidx67, align 4
+  %call68 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %17 = load float, ptr %scale.addr, align 4
+  %mul69 = fmul float 0x3FA47AE140000000, %17
+  %18 = load float, ptr %scale.addr, align 4
+  %mul70 = fmul float 2.500000e-01, %18
+  %call73 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call68, float %mul69, float %mul70)
           to label %invoke.cont72 unwind label %lpad71
 
 invoke.cont72:                                    ; preds = %invoke.cont63
-  %37 = bitcast %class.btCapsuleShape* %34 to %class.btCollisionShape*
-  %m_shapes75 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx76 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes75, i32 0, i32 8
-  store %class.btCollisionShape* %37, %class.btCollisionShape** %arrayidx76, align 4
-  %call77 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %38 = bitcast i8* %call77 to %class.btCapsuleShape*
-  %39 = load float, float* %scale.addr, align 4
-  %mul78 = fmul float 0x3FA99999A0000000, %39
-  %40 = load float, float* %scale.addr, align 4
-  %mul79 = fmul float 0x3FD51EB860000000, %40
-  %call82 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %38, float %mul78, float %mul79)
+  %m_shapes75 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx76 = getelementptr inbounds [11 x ptr], ptr %m_shapes75, i32 0, i32 8
+  store ptr %call68, ptr %arrayidx76, align 4
+  %call77 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %19 = load float, ptr %scale.addr, align 4
+  %mul78 = fmul float 0x3FA99999A0000000, %19
+  %20 = load float, ptr %scale.addr, align 4
+  %mul79 = fmul float 0x3FD51EB860000000, %20
+  %call82 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call77, float %mul78, float %mul79)
           to label %invoke.cont81 unwind label %lpad80
 
 invoke.cont81:                                    ; preds = %invoke.cont72
-  %41 = bitcast %class.btCapsuleShape* %38 to %class.btCollisionShape*
-  %m_shapes84 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx85 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes84, i32 0, i32 9
-  store %class.btCollisionShape* %41, %class.btCollisionShape** %arrayidx85, align 4
-  %call86 = call i8* @_ZN13btConvexShapenwEm(i32 56)
-  %42 = bitcast i8* %call86 to %class.btCapsuleShape*
-  %43 = load float, float* %scale.addr, align 4
-  %mul87 = fmul float 0x3FA47AE140000000, %43
-  %44 = load float, float* %scale.addr, align 4
-  %mul88 = fmul float 2.500000e-01, %44
-  %call91 = invoke %class.btCapsuleShape* @_ZN14btCapsuleShapeC1Eff(%class.btCapsuleShape* %42, float %mul87, float %mul88)
+  %m_shapes84 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx85 = getelementptr inbounds [11 x ptr], ptr %m_shapes84, i32 0, i32 9
+  store ptr %call77, ptr %arrayidx85, align 4
+  %call86 = call ptr @_ZN13btConvexShapenwEm(i32 56)
+  %21 = load float, ptr %scale.addr, align 4
+  %mul87 = fmul float 0x3FA47AE140000000, %21
+  %22 = load float, ptr %scale.addr, align 4
+  %mul88 = fmul float 2.500000e-01, %22
+  %call91 = invoke ptr @_ZN14btCapsuleShapeC1Eff(ptr %call86, float %mul87, float %mul88)
           to label %invoke.cont90 unwind label %lpad89
 
 invoke.cont90:                                    ; preds = %invoke.cont81
-  %45 = bitcast %class.btCapsuleShape* %42 to %class.btCollisionShape*
-  %m_shapes93 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx94 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes93, i32 0, i32 10
-  store %class.btCollisionShape* %45, %class.btCollisionShape** %arrayidx94, align 4
-  %call95 = call %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform* %offset)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %offset)
-  %46 = load %class.btVector3*, %class.btVector3** %positionOffset.addr, align 4
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %offset, %class.btVector3* %46)
-  %call96 = call %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform* %transform)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0.000000e+00, float* %ref.tmp98, align 4
-  store float 1.000000e+00, float* %ref.tmp99, align 4
-  store float 0.000000e+00, float* %ref.tmp100, align 4
-  %call101 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp97, float* %ref.tmp98, float* %ref.tmp99, float* %ref.tmp100)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp, float* %scale.addr, %class.btVector3* %ref.tmp97)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp102, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes103 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx104 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes103, i32 0, i32 0
-  %47 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx104, align 4
-  %call105 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp102, %class.btCollisionShape* %47)
-  %m_bodies = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx106 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies, i32 0, i32 0
-  store %class.btRigidBody* %call105, %class.btRigidBody** %arrayidx106, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0.000000e+00, float* %ref.tmp109, align 4
-  store float 0x3FF3333340000000, float* %ref.tmp110, align 4
-  store float 0.000000e+00, float* %ref.tmp111, align 4
-  %call112 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp108, float* %ref.tmp109, float* %ref.tmp110, float* %ref.tmp111)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp107, float* %scale.addr, %class.btVector3* %ref.tmp108)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp107)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp113, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes114 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx115 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes114, i32 0, i32 1
-  %48 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx115, align 4
-  %call116 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp113, %class.btCollisionShape* %48)
-  %m_bodies117 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx118 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies117, i32 0, i32 1
-  store %class.btRigidBody* %call116, %class.btRigidBody** %arrayidx118, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0.000000e+00, float* %ref.tmp121, align 4
-  store float 0x3FF99999A0000000, float* %ref.tmp122, align 4
-  store float 0.000000e+00, float* %ref.tmp123, align 4
-  %call124 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp120, float* %ref.tmp121, float* %ref.tmp122, float* %ref.tmp123)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp119, float* %scale.addr, %class.btVector3* %ref.tmp120)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp119)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp125, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes126 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx127 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes126, i32 0, i32 2
-  %49 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx127, align 4
-  %call128 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp125, %class.btCollisionShape* %49)
-  %m_bodies129 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx130 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies129, i32 0, i32 2
-  store %class.btRigidBody* %call128, %class.btRigidBody** %arrayidx130, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0xBFC70A3D80000000, float* %ref.tmp133, align 4
-  store float 0x3FE4CCCCC0000000, float* %ref.tmp134, align 4
-  store float 0.000000e+00, float* %ref.tmp135, align 4
-  %call136 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp132, float* %ref.tmp133, float* %ref.tmp134, float* %ref.tmp135)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp131, float* %scale.addr, %class.btVector3* %ref.tmp132)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp131)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp137, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes138 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx139 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes138, i32 0, i32 3
-  %50 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx139, align 4
-  %call140 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp137, %class.btCollisionShape* %50)
-  %m_bodies141 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx142 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies141, i32 0, i32 3
-  store %class.btRigidBody* %call140, %class.btRigidBody** %arrayidx142, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0xBFC70A3D80000000, float* %ref.tmp145, align 4
-  store float 0x3FC99999A0000000, float* %ref.tmp146, align 4
-  store float 0.000000e+00, float* %ref.tmp147, align 4
-  %call148 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp144, float* %ref.tmp145, float* %ref.tmp146, float* %ref.tmp147)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp143, float* %scale.addr, %class.btVector3* %ref.tmp144)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp143)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp149, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes150 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx151 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes150, i32 0, i32 4
-  %51 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx151, align 4
-  %call152 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp149, %class.btCollisionShape* %51)
-  %m_bodies153 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx154 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies153, i32 0, i32 4
-  store %class.btRigidBody* %call152, %class.btRigidBody** %arrayidx154, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0x3FC70A3D80000000, float* %ref.tmp157, align 4
-  store float 0x3FE4CCCCC0000000, float* %ref.tmp158, align 4
-  store float 0.000000e+00, float* %ref.tmp159, align 4
-  %call160 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp156, float* %ref.tmp157, float* %ref.tmp158, float* %ref.tmp159)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp155, float* %scale.addr, %class.btVector3* %ref.tmp156)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp155)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp161, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes162 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx163 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes162, i32 0, i32 5
-  %52 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx163, align 4
-  %call164 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp161, %class.btCollisionShape* %52)
-  %m_bodies165 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx166 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies165, i32 0, i32 5
-  store %class.btRigidBody* %call164, %class.btRigidBody** %arrayidx166, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0x3FC70A3D80000000, float* %ref.tmp169, align 4
-  store float 0x3FC99999A0000000, float* %ref.tmp170, align 4
-  store float 0.000000e+00, float* %ref.tmp171, align 4
-  %call172 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp168, float* %ref.tmp169, float* %ref.tmp170, float* %ref.tmp171)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp167, float* %scale.addr, %class.btVector3* %ref.tmp168)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp167)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp173, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes174 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx175 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes174, i32 0, i32 6
-  %53 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx175, align 4
-  %call176 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp173, %class.btCollisionShape* %53)
-  %m_bodies177 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx178 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies177, i32 0, i32 6
-  store %class.btRigidBody* %call176, %class.btRigidBody** %arrayidx178, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0xBFD6666660000000, float* %ref.tmp181, align 4
-  store float 0x3FF7333340000000, float* %ref.tmp182, align 4
-  store float 0.000000e+00, float* %ref.tmp183, align 4
-  %call184 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp180, float* %ref.tmp181, float* %ref.tmp182, float* %ref.tmp183)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp179, float* %scale.addr, %class.btVector3* %ref.tmp180)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp179)
-  %call185 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call185, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp186, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes187 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx188 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes187, i32 0, i32 7
-  %54 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx188, align 4
-  %call189 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp186, %class.btCollisionShape* %54)
-  %m_bodies190 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx191 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies190, i32 0, i32 7
-  store %class.btRigidBody* %call189, %class.btRigidBody** %arrayidx191, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0xBFE6666660000000, float* %ref.tmp194, align 4
-  store float 0x3FF7333340000000, float* %ref.tmp195, align 4
-  store float 0.000000e+00, float* %ref.tmp196, align 4
-  %call197 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp193, float* %ref.tmp194, float* %ref.tmp195, float* %ref.tmp196)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp192, float* %scale.addr, %class.btVector3* %ref.tmp193)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp192)
-  %call198 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call198, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp199, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes200 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx201 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes200, i32 0, i32 8
-  %55 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx201, align 4
-  %call202 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp199, %class.btCollisionShape* %55)
-  %m_bodies203 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx204 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies203, i32 0, i32 8
-  store %class.btRigidBody* %call202, %class.btRigidBody** %arrayidx204, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0x3FD6666660000000, float* %ref.tmp207, align 4
-  store float 0x3FF7333340000000, float* %ref.tmp208, align 4
-  store float 0.000000e+00, float* %ref.tmp209, align 4
-  %call210 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp206, float* %ref.tmp207, float* %ref.tmp208, float* %ref.tmp209)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp205, float* %scale.addr, %class.btVector3* %ref.tmp206)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp205)
-  %call211 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call211, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp212, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes213 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx214 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes213, i32 0, i32 9
-  %56 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx214, align 4
-  %call215 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp212, %class.btCollisionShape* %56)
-  %m_bodies216 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx217 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies216, i32 0, i32 9
-  store %class.btRigidBody* %call215, %class.btRigidBody** %arrayidx217, align 4
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
-  store float 0x3FE6666660000000, float* %ref.tmp220, align 4
-  store float 0x3FF7333340000000, float* %ref.tmp221, align 4
-  store float 0.000000e+00, float* %ref.tmp222, align 4
-  %call223 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp219, float* %ref.tmp220, float* %ref.tmp221, float* %ref.tmp222)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp218, float* %scale.addr, %class.btVector3* %ref.tmp219)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp218)
-  %call224 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call224, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
-  call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret(%class.btTransform) %ref.tmp225, %class.btTransform* %offset, %class.btTransform* %transform)
-  %m_shapes226 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
-  %arrayidx227 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes226, i32 0, i32 10
-  %57 = load %class.btCollisionShape*, %class.btCollisionShape** %arrayidx227, align 4
-  %call228 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp225, %class.btCollisionShape* %57)
-  %m_bodies229 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx230 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies229, i32 0, i32 10
-  store %class.btRigidBody* %call228, %class.btRigidBody** %arrayidx230, align 4
-  store i32 0, i32* %i, align 4
+  %m_shapes93 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx94 = getelementptr inbounds [11 x ptr], ptr %m_shapes93, i32 0, i32 10
+  store ptr %call86, ptr %arrayidx94, align 4
+  %call95 = call ptr @_ZN11btTransformC1Ev(ptr %offset)
+  call void @_ZN11btTransform11setIdentityEv(ptr %offset)
+  %23 = load ptr, ptr %positionOffset.addr, align 4
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %offset, ptr %23)
+  %call96 = call ptr @_ZN11btTransformC1Ev(ptr %transform)
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0.000000e+00, ptr %ref.tmp98, align 4
+  store float 1.000000e+00, ptr %ref.tmp99, align 4
+  store float 0.000000e+00, ptr %ref.tmp100, align 4
+  %call101 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp97, ptr %ref.tmp98, ptr %ref.tmp99, ptr %ref.tmp100)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp, ptr %scale.addr, ptr %ref.tmp97)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp102, ptr %offset, ptr %transform)
+  %m_shapes103 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %24 = load ptr, ptr %m_shapes103, align 4
+  %call105 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp102, ptr %24)
+  %m_bodies = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  store ptr %call105, ptr %m_bodies, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0.000000e+00, ptr %ref.tmp109, align 4
+  store float 0x3FF3333340000000, ptr %ref.tmp110, align 4
+  store float 0.000000e+00, ptr %ref.tmp111, align 4
+  %call112 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp108, ptr %ref.tmp109, ptr %ref.tmp110, ptr %ref.tmp111)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp107, ptr %scale.addr, ptr %ref.tmp108)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp107)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp113, ptr %offset, ptr %transform)
+  %m_shapes114 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx115 = getelementptr inbounds [11 x ptr], ptr %m_shapes114, i32 0, i32 1
+  %25 = load ptr, ptr %arrayidx115, align 4
+  %call116 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp113, ptr %25)
+  %m_bodies117 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx118 = getelementptr inbounds [11 x ptr], ptr %m_bodies117, i32 0, i32 1
+  store ptr %call116, ptr %arrayidx118, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0.000000e+00, ptr %ref.tmp121, align 4
+  store float 0x3FF99999A0000000, ptr %ref.tmp122, align 4
+  store float 0.000000e+00, ptr %ref.tmp123, align 4
+  %call124 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp120, ptr %ref.tmp121, ptr %ref.tmp122, ptr %ref.tmp123)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp119, ptr %scale.addr, ptr %ref.tmp120)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp119)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp125, ptr %offset, ptr %transform)
+  %m_shapes126 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx127 = getelementptr inbounds [11 x ptr], ptr %m_shapes126, i32 0, i32 2
+  %26 = load ptr, ptr %arrayidx127, align 4
+  %call128 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp125, ptr %26)
+  %m_bodies129 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx130 = getelementptr inbounds [11 x ptr], ptr %m_bodies129, i32 0, i32 2
+  store ptr %call128, ptr %arrayidx130, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0xBFC70A3D80000000, ptr %ref.tmp133, align 4
+  store float 0x3FE4CCCCC0000000, ptr %ref.tmp134, align 4
+  store float 0.000000e+00, ptr %ref.tmp135, align 4
+  %call136 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp132, ptr %ref.tmp133, ptr %ref.tmp134, ptr %ref.tmp135)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp131, ptr %scale.addr, ptr %ref.tmp132)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp131)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp137, ptr %offset, ptr %transform)
+  %m_shapes138 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx139 = getelementptr inbounds [11 x ptr], ptr %m_shapes138, i32 0, i32 3
+  %27 = load ptr, ptr %arrayidx139, align 4
+  %call140 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp137, ptr %27)
+  %m_bodies141 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx142 = getelementptr inbounds [11 x ptr], ptr %m_bodies141, i32 0, i32 3
+  store ptr %call140, ptr %arrayidx142, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0xBFC70A3D80000000, ptr %ref.tmp145, align 4
+  store float 0x3FC99999A0000000, ptr %ref.tmp146, align 4
+  store float 0.000000e+00, ptr %ref.tmp147, align 4
+  %call148 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp144, ptr %ref.tmp145, ptr %ref.tmp146, ptr %ref.tmp147)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp143, ptr %scale.addr, ptr %ref.tmp144)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp143)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp149, ptr %offset, ptr %transform)
+  %m_shapes150 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx151 = getelementptr inbounds [11 x ptr], ptr %m_shapes150, i32 0, i32 4
+  %28 = load ptr, ptr %arrayidx151, align 4
+  %call152 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp149, ptr %28)
+  %m_bodies153 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx154 = getelementptr inbounds [11 x ptr], ptr %m_bodies153, i32 0, i32 4
+  store ptr %call152, ptr %arrayidx154, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0x3FC70A3D80000000, ptr %ref.tmp157, align 4
+  store float 0x3FE4CCCCC0000000, ptr %ref.tmp158, align 4
+  store float 0.000000e+00, ptr %ref.tmp159, align 4
+  %call160 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp156, ptr %ref.tmp157, ptr %ref.tmp158, ptr %ref.tmp159)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp155, ptr %scale.addr, ptr %ref.tmp156)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp155)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp161, ptr %offset, ptr %transform)
+  %m_shapes162 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx163 = getelementptr inbounds [11 x ptr], ptr %m_shapes162, i32 0, i32 5
+  %29 = load ptr, ptr %arrayidx163, align 4
+  %call164 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp161, ptr %29)
+  %m_bodies165 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx166 = getelementptr inbounds [11 x ptr], ptr %m_bodies165, i32 0, i32 5
+  store ptr %call164, ptr %arrayidx166, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0x3FC70A3D80000000, ptr %ref.tmp169, align 4
+  store float 0x3FC99999A0000000, ptr %ref.tmp170, align 4
+  store float 0.000000e+00, ptr %ref.tmp171, align 4
+  %call172 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp168, ptr %ref.tmp169, ptr %ref.tmp170, ptr %ref.tmp171)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp167, ptr %scale.addr, ptr %ref.tmp168)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp167)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp173, ptr %offset, ptr %transform)
+  %m_shapes174 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx175 = getelementptr inbounds [11 x ptr], ptr %m_shapes174, i32 0, i32 6
+  %30 = load ptr, ptr %arrayidx175, align 4
+  %call176 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp173, ptr %30)
+  %m_bodies177 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx178 = getelementptr inbounds [11 x ptr], ptr %m_bodies177, i32 0, i32 6
+  store ptr %call176, ptr %arrayidx178, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0xBFD6666660000000, ptr %ref.tmp181, align 4
+  store float 0x3FF7333340000000, ptr %ref.tmp182, align 4
+  store float 0.000000e+00, ptr %ref.tmp183, align 4
+  %call184 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp180, ptr %ref.tmp181, ptr %ref.tmp182, ptr %ref.tmp183)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp179, ptr %scale.addr, ptr %ref.tmp180)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp179)
+  %call185 = call ptr @_ZN11btTransform8getBasisEv(ptr %transform)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call185, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp186, ptr %offset, ptr %transform)
+  %m_shapes187 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx188 = getelementptr inbounds [11 x ptr], ptr %m_shapes187, i32 0, i32 7
+  %31 = load ptr, ptr %arrayidx188, align 4
+  %call189 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp186, ptr %31)
+  %m_bodies190 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx191 = getelementptr inbounds [11 x ptr], ptr %m_bodies190, i32 0, i32 7
+  store ptr %call189, ptr %arrayidx191, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0xBFE6666660000000, ptr %ref.tmp194, align 4
+  store float 0x3FF7333340000000, ptr %ref.tmp195, align 4
+  store float 0.000000e+00, ptr %ref.tmp196, align 4
+  %call197 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp193, ptr %ref.tmp194, ptr %ref.tmp195, ptr %ref.tmp196)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp192, ptr %scale.addr, ptr %ref.tmp193)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp192)
+  %call198 = call ptr @_ZN11btTransform8getBasisEv(ptr %transform)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call198, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp199, ptr %offset, ptr %transform)
+  %m_shapes200 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx201 = getelementptr inbounds [11 x ptr], ptr %m_shapes200, i32 0, i32 8
+  %32 = load ptr, ptr %arrayidx201, align 4
+  %call202 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp199, ptr %32)
+  %m_bodies203 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx204 = getelementptr inbounds [11 x ptr], ptr %m_bodies203, i32 0, i32 8
+  store ptr %call202, ptr %arrayidx204, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0x3FD6666660000000, ptr %ref.tmp207, align 4
+  store float 0x3FF7333340000000, ptr %ref.tmp208, align 4
+  store float 0.000000e+00, ptr %ref.tmp209, align 4
+  %call210 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp206, ptr %ref.tmp207, ptr %ref.tmp208, ptr %ref.tmp209)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp205, ptr %scale.addr, ptr %ref.tmp206)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp205)
+  %call211 = call ptr @_ZN11btTransform8getBasisEv(ptr %transform)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call211, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp212, ptr %offset, ptr %transform)
+  %m_shapes213 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx214 = getelementptr inbounds [11 x ptr], ptr %m_shapes213, i32 0, i32 9
+  %33 = load ptr, ptr %arrayidx214, align 4
+  %call215 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp212, ptr %33)
+  %m_bodies216 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx217 = getelementptr inbounds [11 x ptr], ptr %m_bodies216, i32 0, i32 9
+  store ptr %call215, ptr %arrayidx217, align 4
+  call void @_ZN11btTransform11setIdentityEv(ptr %transform)
+  store float 0x3FE6666660000000, ptr %ref.tmp220, align 4
+  store float 0x3FF7333340000000, ptr %ref.tmp221, align 4
+  store float 0.000000e+00, ptr %ref.tmp222, align 4
+  %call223 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp219, ptr %ref.tmp220, ptr %ref.tmp221, ptr %ref.tmp222)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp218, ptr %scale.addr, ptr %ref.tmp219)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %transform, ptr %ref.tmp218)
+  %call224 = call ptr @_ZN11btTransform8getBasisEv(ptr %transform)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call224, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
+  call void @_ZNK11btTransformmlERKS_(ptr sret(%class.btTransform) %ref.tmp225, ptr %offset, ptr %transform)
+  %m_shapes226 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 2
+  %arrayidx227 = getelementptr inbounds [11 x ptr], ptr %m_shapes226, i32 0, i32 10
+  %34 = load ptr, ptr %arrayidx227, align 4
+  %call228 = call ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr %this1, float 1.000000e+00, ptr %ref.tmp225, ptr %34)
+  %m_bodies229 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx230 = getelementptr inbounds [11 x ptr], ptr %m_bodies229, i32 0, i32 10
+  store ptr %call228, ptr %arrayidx230, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %invoke.cont90
-  %58 = load i32, i32* %i, align 4
-  %cmp = icmp slt i32 %58, 11
+  %35 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %35, 11
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %59 = load i32, i32* %i, align 4
-  %m_bodies231 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx232 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies231, i32 0, i32 %59
-  %60 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx232, align 4
-  call void @_ZN11btRigidBody10setDampingEff(%class.btRigidBody* %60, float 0x3FA99999A0000000, float 0x3FEB333340000000)
-  %61 = load i32, i32* %i, align 4
-  %m_bodies233 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx234 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies233, i32 0, i32 %61
-  %62 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx234, align 4
-  %63 = bitcast %class.btRigidBody* %62 to %class.btCollisionObject*
-  call void @_ZN17btCollisionObject19setDeactivationTimeEf(%class.btCollisionObject* %63, float 0x3FE99999A0000000)
-  %64 = load i32, i32* %i, align 4
-  %m_bodies235 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx236 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies235, i32 0, i32 %64
-  %65 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx236, align 4
-  call void @_ZN11btRigidBody21setSleepingThresholdsEff(%class.btRigidBody* %65, float 0x3FF99999A0000000, float 2.500000e+00)
+  %36 = load i32, ptr %i, align 4
+  %m_bodies231 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx232 = getelementptr inbounds [11 x ptr], ptr %m_bodies231, i32 0, i32 %36
+  %37 = load ptr, ptr %arrayidx232, align 4
+  call void @_ZN11btRigidBody10setDampingEff(ptr %37, float 0x3FA99999A0000000, float 0x3FEB333340000000)
+  %38 = load i32, ptr %i, align 4
+  %m_bodies233 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx234 = getelementptr inbounds [11 x ptr], ptr %m_bodies233, i32 0, i32 %38
+  %39 = load ptr, ptr %arrayidx234, align 4
+  call void @_ZN17btCollisionObject19setDeactivationTimeEf(ptr %39, float 0x3FE99999A0000000)
+  %40 = load i32, ptr %i, align 4
+  %m_bodies235 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx236 = getelementptr inbounds [11 x ptr], ptr %m_bodies235, i32 0, i32 %40
+  %41 = load ptr, ptr %arrayidx236, align 4
+  call void @_ZN11btRigidBody21setSleepingThresholdsEff(ptr %41, float 0x3FF99999A0000000, float 2.500000e+00)
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %66 = load i32, i32* %i, align 4
-  %inc = add nsw i32 %66, 1
-  store i32 %inc, i32* %i, align 4
+  %42 = load i32, ptr %i, align 4
+  %inc = add nsw i32 %42, 1
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 lpad:                                             ; preds = %entry
-  %67 = landingpad { i8*, i32 }
+  %43 = landingpad { ptr, i32 }
           cleanup
-  %68 = extractvalue { i8*, i32 } %67, 0
-  store i8* %68, i8** %exn.slot
-  %69 = extractvalue { i8*, i32 } %67, 1
-  store i32 %69, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call)
+  %44 = extractvalue { ptr, i32 } %43, 0
+  store ptr %44, ptr %exn.slot
+  %45 = extractvalue { ptr, i32 } %43, 1
+  store i32 %45, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call)
           to label %invoke.cont4 unwind label %terminate.lpad
 
 invoke.cont4:                                     ; preds = %lpad
   br label %eh.resume
 
 lpad8:                                            ; preds = %invoke.cont
-  %70 = landingpad { i8*, i32 }
+  %46 = landingpad { ptr, i32 }
           cleanup
-  %71 = extractvalue { i8*, i32 } %70, 0
-  store i8* %71, i8** %exn.slot
-  %72 = extractvalue { i8*, i32 } %70, 1
-  store i32 %72, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call5)
+  %47 = extractvalue { ptr, i32 } %46, 0
+  store ptr %47, ptr %exn.slot
+  %48 = extractvalue { ptr, i32 } %46, 1
+  store i32 %48, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call5)
           to label %invoke.cont11 unwind label %terminate.lpad
 
 invoke.cont11:                                    ; preds = %lpad8
   br label %eh.resume
 
 lpad17:                                           ; preds = %invoke.cont9
-  %73 = landingpad { i8*, i32 }
+  %49 = landingpad { ptr, i32 }
           cleanup
-  %74 = extractvalue { i8*, i32 } %73, 0
-  store i8* %74, i8** %exn.slot
-  %75 = extractvalue { i8*, i32 } %73, 1
-  store i32 %75, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call14)
+  %50 = extractvalue { ptr, i32 } %49, 0
+  store ptr %50, ptr %exn.slot
+  %51 = extractvalue { ptr, i32 } %49, 1
+  store i32 %51, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call14)
           to label %invoke.cont20 unwind label %terminate.lpad
 
 invoke.cont20:                                    ; preds = %lpad17
   br label %eh.resume
 
 lpad26:                                           ; preds = %invoke.cont18
-  %76 = landingpad { i8*, i32 }
+  %52 = landingpad { ptr, i32 }
           cleanup
-  %77 = extractvalue { i8*, i32 } %76, 0
-  store i8* %77, i8** %exn.slot
-  %78 = extractvalue { i8*, i32 } %76, 1
-  store i32 %78, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call23)
+  %53 = extractvalue { ptr, i32 } %52, 0
+  store ptr %53, ptr %exn.slot
+  %54 = extractvalue { ptr, i32 } %52, 1
+  store i32 %54, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call23)
           to label %invoke.cont29 unwind label %terminate.lpad
 
 invoke.cont29:                                    ; preds = %lpad26
   br label %eh.resume
 
 lpad35:                                           ; preds = %invoke.cont27
-  %79 = landingpad { i8*, i32 }
+  %55 = landingpad { ptr, i32 }
           cleanup
-  %80 = extractvalue { i8*, i32 } %79, 0
-  store i8* %80, i8** %exn.slot
-  %81 = extractvalue { i8*, i32 } %79, 1
-  store i32 %81, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call32)
+  %56 = extractvalue { ptr, i32 } %55, 0
+  store ptr %56, ptr %exn.slot
+  %57 = extractvalue { ptr, i32 } %55, 1
+  store i32 %57, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call32)
           to label %invoke.cont38 unwind label %terminate.lpad
 
 invoke.cont38:                                    ; preds = %lpad35
   br label %eh.resume
 
 lpad44:                                           ; preds = %invoke.cont36
-  %82 = landingpad { i8*, i32 }
+  %58 = landingpad { ptr, i32 }
           cleanup
-  %83 = extractvalue { i8*, i32 } %82, 0
-  store i8* %83, i8** %exn.slot
-  %84 = extractvalue { i8*, i32 } %82, 1
-  store i32 %84, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call41)
+  %59 = extractvalue { ptr, i32 } %58, 0
+  store ptr %59, ptr %exn.slot
+  %60 = extractvalue { ptr, i32 } %58, 1
+  store i32 %60, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call41)
           to label %invoke.cont47 unwind label %terminate.lpad
 
 invoke.cont47:                                    ; preds = %lpad44
   br label %eh.resume
 
 lpad53:                                           ; preds = %invoke.cont45
-  %85 = landingpad { i8*, i32 }
+  %61 = landingpad { ptr, i32 }
           cleanup
-  %86 = extractvalue { i8*, i32 } %85, 0
-  store i8* %86, i8** %exn.slot
-  %87 = extractvalue { i8*, i32 } %85, 1
-  store i32 %87, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call50)
+  %62 = extractvalue { ptr, i32 } %61, 0
+  store ptr %62, ptr %exn.slot
+  %63 = extractvalue { ptr, i32 } %61, 1
+  store i32 %63, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call50)
           to label %invoke.cont56 unwind label %terminate.lpad
 
 invoke.cont56:                                    ; preds = %lpad53
   br label %eh.resume
 
 lpad62:                                           ; preds = %invoke.cont54
-  %88 = landingpad { i8*, i32 }
+  %64 = landingpad { ptr, i32 }
           cleanup
-  %89 = extractvalue { i8*, i32 } %88, 0
-  store i8* %89, i8** %exn.slot
-  %90 = extractvalue { i8*, i32 } %88, 1
-  store i32 %90, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call59)
+  %65 = extractvalue { ptr, i32 } %64, 0
+  store ptr %65, ptr %exn.slot
+  %66 = extractvalue { ptr, i32 } %64, 1
+  store i32 %66, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call59)
           to label %invoke.cont65 unwind label %terminate.lpad
 
 invoke.cont65:                                    ; preds = %lpad62
   br label %eh.resume
 
 lpad71:                                           ; preds = %invoke.cont63
-  %91 = landingpad { i8*, i32 }
+  %67 = landingpad { ptr, i32 }
           cleanup
-  %92 = extractvalue { i8*, i32 } %91, 0
-  store i8* %92, i8** %exn.slot
-  %93 = extractvalue { i8*, i32 } %91, 1
-  store i32 %93, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call68)
+  %68 = extractvalue { ptr, i32 } %67, 0
+  store ptr %68, ptr %exn.slot
+  %69 = extractvalue { ptr, i32 } %67, 1
+  store i32 %69, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call68)
           to label %invoke.cont74 unwind label %terminate.lpad
 
 invoke.cont74:                                    ; preds = %lpad71
   br label %eh.resume
 
 lpad80:                                           ; preds = %invoke.cont72
-  %94 = landingpad { i8*, i32 }
+  %70 = landingpad { ptr, i32 }
           cleanup
-  %95 = extractvalue { i8*, i32 } %94, 0
-  store i8* %95, i8** %exn.slot
-  %96 = extractvalue { i8*, i32 } %94, 1
-  store i32 %96, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call77)
+  %71 = extractvalue { ptr, i32 } %70, 0
+  store ptr %71, ptr %exn.slot
+  %72 = extractvalue { ptr, i32 } %70, 1
+  store i32 %72, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call77)
           to label %invoke.cont83 unwind label %terminate.lpad
 
 invoke.cont83:                                    ; preds = %lpad80
   br label %eh.resume
 
 lpad89:                                           ; preds = %invoke.cont81
-  %97 = landingpad { i8*, i32 }
+  %73 = landingpad { ptr, i32 }
           cleanup
-  %98 = extractvalue { i8*, i32 } %97, 0
-  store i8* %98, i8** %exn.slot
-  %99 = extractvalue { i8*, i32 } %97, 1
-  store i32 %99, i32* %ehselector.slot
-  invoke void @_ZN13btConvexShapedlEPv(i8* %call86)
+  %74 = extractvalue { ptr, i32 } %73, 0
+  store ptr %74, ptr %exn.slot
+  %75 = extractvalue { ptr, i32 } %73, 1
+  store i32 %75, ptr %ehselector.slot
+  invoke void @_ZN13btConvexShapedlEPv(ptr %call86)
           to label %invoke.cont92 unwind label %terminate.lpad
 
 invoke.cont92:                                    ; preds = %lpad89
   br label %eh.resume
 
 for.end:                                          ; preds = %for.cond
-  %call237 = call %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform* %localA)
-  %call238 = call %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform* %localB)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call239 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call239, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp242, align 4
-  store float 0x3FC3333340000000, float* %ref.tmp243, align 4
-  store float 0.000000e+00, float* %ref.tmp244, align 4
-  %call245 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp241, float* %ref.tmp242, float* %ref.tmp243, float* %ref.tmp244)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp240, float* %scale.addr, %class.btVector3* %ref.tmp241)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp240)
-  %call246 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call246, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp249, align 4
-  store float 0xBFC3333340000000, float* %ref.tmp250, align 4
-  store float 0.000000e+00, float* %ref.tmp251, align 4
-  %call252 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp248, float* %ref.tmp249, float* %ref.tmp250, float* %ref.tmp251)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp247, float* %scale.addr, %class.btVector3* %ref.tmp248)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp247)
-  %call253 = call noalias i8* @_Znwm(i32 780)
-  %100 = bitcast i8* %call253 to %class.btHingeConstraint*
-  %m_bodies254 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx255 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies254, i32 0, i32 0
-  %101 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx255, align 4
-  %m_bodies256 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx257 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies256, i32 0, i32 1
-  %102 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx257, align 4
-  %call260 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %100, %class.btRigidBody* %101, %class.btRigidBody* %102, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
+  %call237 = call ptr @_ZN11btTransformC1Ev(ptr %localA)
+  %call238 = call ptr @_ZN11btTransformC1Ev(ptr %localB)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call239 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call239, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp242, align 4
+  store float 0x3FC3333340000000, ptr %ref.tmp243, align 4
+  store float 0.000000e+00, ptr %ref.tmp244, align 4
+  %call245 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp241, ptr %ref.tmp242, ptr %ref.tmp243, ptr %ref.tmp244)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp240, ptr %scale.addr, ptr %ref.tmp241)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp240)
+  %call246 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call246, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp249, align 4
+  store float 0xBFC3333340000000, ptr %ref.tmp250, align 4
+  store float 0.000000e+00, ptr %ref.tmp251, align 4
+  %call252 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp248, ptr %ref.tmp249, ptr %ref.tmp250, ptr %ref.tmp251)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp247, ptr %scale.addr, ptr %ref.tmp248)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp247)
+  %call253 = call noalias ptr @_Znwm(i32 780)
+  %m_bodies254 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %76 = load ptr, ptr %m_bodies254, align 4
+  %m_bodies256 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx257 = getelementptr inbounds [11 x ptr], ptr %m_bodies256, i32 0, i32 1
+  %77 = load ptr, ptr %arrayidx257, align 4
+  %call260 = invoke ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr %call253, ptr %76, ptr %77, ptr %localA, ptr %localB, i1 zeroext false)
           to label %invoke.cont259 unwind label %lpad258
 
 invoke.cont259:                                   ; preds = %for.end
-  store %class.btHingeConstraint* %100, %class.btHingeConstraint** %hingeC, align 4
-  %103 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %103, float 0xBFE921FB60000000, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
-  %104 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  %105 = bitcast %class.btHingeConstraint* %104 to %class.btTypedConstraint*
-  %m_joints = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx261 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints, i32 0, i32 0
-  store %class.btTypedConstraint* %105, %class.btTypedConstraint** %arrayidx261, align 4
-  %m_ownerWorld262 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %106 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld262, align 4
-  %107 = bitcast %class.btDynamicsWorld* %106 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %107
-  %vfn = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable, i64 10
-  %108 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn
-  %m_joints263 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx264 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints263, i32 0, i32 0
-  %109 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx264, align 4
-  call void %108(%class.btDynamicsWorld* %106, %class.btTypedConstraint* %109, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call265 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call265, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  store float 0.000000e+00, float* %ref.tmp268, align 4
-  store float 0x3FD3333340000000, float* %ref.tmp269, align 4
-  store float 0.000000e+00, float* %ref.tmp270, align 4
-  %call271 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp267, float* %ref.tmp268, float* %ref.tmp269, float* %ref.tmp270)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp266, float* %scale.addr, %class.btVector3* %ref.tmp267)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp266)
-  %call272 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call272, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  store float 0.000000e+00, float* %ref.tmp275, align 4
-  store float 0xBFC1EB8520000000, float* %ref.tmp276, align 4
-  store float 0.000000e+00, float* %ref.tmp277, align 4
-  %call278 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp274, float* %ref.tmp275, float* %ref.tmp276, float* %ref.tmp277)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp273, float* %scale.addr, %class.btVector3* %ref.tmp274)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp273)
-  %call279 = call noalias i8* @_Znwm(i32 628)
-  %110 = bitcast i8* %call279 to %class.btConeTwistConstraint*
-  %m_bodies280 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx281 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies280, i32 0, i32 1
-  %111 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx281, align 4
-  %m_bodies282 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx283 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies282, i32 0, i32 2
-  %112 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx283, align 4
-  %call286 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %110, %class.btRigidBody* %111, %class.btRigidBody* %112, %class.btTransform* %localA, %class.btTransform* %localB)
+  store ptr %call253, ptr %hingeC, align 4
+  %78 = load ptr, ptr %hingeC, align 4
+  call void @_ZN17btHingeConstraint8setLimitEfffff(ptr %78, float 0xBFE921FB60000000, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
+  %79 = load ptr, ptr %hingeC, align 4
+  %m_joints = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  store ptr %79, ptr %m_joints, align 4
+  %m_ownerWorld262 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %80 = load ptr, ptr %m_ownerWorld262, align 4
+  %vtable = load ptr, ptr %80
+  %vfn = getelementptr inbounds ptr, ptr %vtable, i64 10
+  %81 = load ptr, ptr %vfn
+  %m_joints263 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %82 = load ptr, ptr %m_joints263, align 4
+  call void %81(ptr %80, ptr %82, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call265 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call265, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  store float 0.000000e+00, ptr %ref.tmp268, align 4
+  store float 0x3FD3333340000000, ptr %ref.tmp269, align 4
+  store float 0.000000e+00, ptr %ref.tmp270, align 4
+  %call271 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp267, ptr %ref.tmp268, ptr %ref.tmp269, ptr %ref.tmp270)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp266, ptr %scale.addr, ptr %ref.tmp267)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp266)
+  %call272 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call272, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  store float 0.000000e+00, ptr %ref.tmp275, align 4
+  store float 0xBFC1EB8520000000, ptr %ref.tmp276, align 4
+  store float 0.000000e+00, ptr %ref.tmp277, align 4
+  %call278 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp274, ptr %ref.tmp275, ptr %ref.tmp276, ptr %ref.tmp277)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp273, ptr %scale.addr, ptr %ref.tmp274)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp273)
+  %call279 = call noalias ptr @_Znwm(i32 628)
+  %m_bodies280 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx281 = getelementptr inbounds [11 x ptr], ptr %m_bodies280, i32 0, i32 1
+  %83 = load ptr, ptr %arrayidx281, align 4
+  %m_bodies282 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx283 = getelementptr inbounds [11 x ptr], ptr %m_bodies282, i32 0, i32 2
+  %84 = load ptr, ptr %arrayidx283, align 4
+  %call286 = invoke ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr %call279, ptr %83, ptr %84, ptr %localA, ptr %localB)
           to label %invoke.cont285 unwind label %lpad284
 
 invoke.cont285:                                   ; preds = %invoke.cont259
-  store %class.btConeTwistConstraint* %110, %class.btConeTwistConstraint** %coneC, align 4
-  %113 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %113, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0x3FF921FB60000000, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
-  %114 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  %115 = bitcast %class.btConeTwistConstraint* %114 to %class.btTypedConstraint*
-  %m_joints287 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx288 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints287, i32 0, i32 1
-  store %class.btTypedConstraint* %115, %class.btTypedConstraint** %arrayidx288, align 4
-  %m_ownerWorld289 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %116 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld289, align 4
-  %117 = bitcast %class.btDynamicsWorld* %116 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable290 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %117
-  %vfn291 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable290, i64 10
-  %118 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn291
-  %m_joints292 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx293 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints292, i32 0, i32 1
-  %119 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx293, align 4
-  call void %118(%class.btDynamicsWorld* %116, %class.btTypedConstraint* %119, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call294 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call294, float 0.000000e+00, float 0.000000e+00, float 0xC00F6A7A20000000)
-  store float 0xBFC70A3D80000000, float* %ref.tmp297, align 4
-  store float 0xBFB99999A0000000, float* %ref.tmp298, align 4
-  store float 0.000000e+00, float* %ref.tmp299, align 4
-  %call300 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp296, float* %ref.tmp297, float* %ref.tmp298, float* %ref.tmp299)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp295, float* %scale.addr, %class.btVector3* %ref.tmp296)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp295)
-  %call301 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call301, float 0.000000e+00, float 0.000000e+00, float 0xC00F6A7A20000000)
-  store float 0.000000e+00, float* %ref.tmp304, align 4
-  store float 0x3FCCCCCCC0000000, float* %ref.tmp305, align 4
-  store float 0.000000e+00, float* %ref.tmp306, align 4
-  %call307 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp303, float* %ref.tmp304, float* %ref.tmp305, float* %ref.tmp306)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp302, float* %scale.addr, %class.btVector3* %ref.tmp303)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp302)
-  %call308 = call noalias i8* @_Znwm(i32 628)
-  %120 = bitcast i8* %call308 to %class.btConeTwistConstraint*
-  %m_bodies309 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx310 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies309, i32 0, i32 0
-  %121 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx310, align 4
-  %m_bodies311 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx312 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies311, i32 0, i32 3
-  %122 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx312, align 4
-  %call315 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %120, %class.btRigidBody* %121, %class.btRigidBody* %122, %class.btTransform* %localA, %class.btTransform* %localB)
+  store ptr %call279, ptr %coneC, align 4
+  %85 = load ptr, ptr %coneC, align 4
+  call void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr %85, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0x3FF921FB60000000, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
+  %86 = load ptr, ptr %coneC, align 4
+  %m_joints287 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx288 = getelementptr inbounds [10 x ptr], ptr %m_joints287, i32 0, i32 1
+  store ptr %86, ptr %arrayidx288, align 4
+  %m_ownerWorld289 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %87 = load ptr, ptr %m_ownerWorld289, align 4
+  %vtable290 = load ptr, ptr %87
+  %vfn291 = getelementptr inbounds ptr, ptr %vtable290, i64 10
+  %88 = load ptr, ptr %vfn291
+  %m_joints292 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx293 = getelementptr inbounds [10 x ptr], ptr %m_joints292, i32 0, i32 1
+  %89 = load ptr, ptr %arrayidx293, align 4
+  call void %88(ptr %87, ptr %89, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call294 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call294, float 0.000000e+00, float 0.000000e+00, float 0xC00F6A7A20000000)
+  store float 0xBFC70A3D80000000, ptr %ref.tmp297, align 4
+  store float 0xBFB99999A0000000, ptr %ref.tmp298, align 4
+  store float 0.000000e+00, ptr %ref.tmp299, align 4
+  %call300 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp296, ptr %ref.tmp297, ptr %ref.tmp298, ptr %ref.tmp299)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp295, ptr %scale.addr, ptr %ref.tmp296)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp295)
+  %call301 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call301, float 0.000000e+00, float 0.000000e+00, float 0xC00F6A7A20000000)
+  store float 0.000000e+00, ptr %ref.tmp304, align 4
+  store float 0x3FCCCCCCC0000000, ptr %ref.tmp305, align 4
+  store float 0.000000e+00, ptr %ref.tmp306, align 4
+  %call307 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp303, ptr %ref.tmp304, ptr %ref.tmp305, ptr %ref.tmp306)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp302, ptr %scale.addr, ptr %ref.tmp303)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp302)
+  %call308 = call noalias ptr @_Znwm(i32 628)
+  %m_bodies309 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %90 = load ptr, ptr %m_bodies309, align 4
+  %m_bodies311 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx312 = getelementptr inbounds [11 x ptr], ptr %m_bodies311, i32 0, i32 3
+  %91 = load ptr, ptr %arrayidx312, align 4
+  %call315 = invoke ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr %call308, ptr %90, ptr %91, ptr %localA, ptr %localB)
           to label %invoke.cont314 unwind label %lpad313
 
 invoke.cont314:                                   ; preds = %invoke.cont285
-  store %class.btConeTwistConstraint* %120, %class.btConeTwistConstraint** %coneC, align 4
-  %123 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %123, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
-  %124 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  %125 = bitcast %class.btConeTwistConstraint* %124 to %class.btTypedConstraint*
-  %m_joints316 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx317 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints316, i32 0, i32 2
-  store %class.btTypedConstraint* %125, %class.btTypedConstraint** %arrayidx317, align 4
-  %m_ownerWorld318 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %126 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld318, align 4
-  %127 = bitcast %class.btDynamicsWorld* %126 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable319 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %127
-  %vfn320 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable319, i64 10
-  %128 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn320
-  %m_joints321 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx322 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints321, i32 0, i32 2
-  %129 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx322, align 4
-  call void %128(%class.btDynamicsWorld* %126, %class.btTypedConstraint* %129, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call323 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call323, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp326, align 4
-  store float 0xBFCCCCCCC0000000, float* %ref.tmp327, align 4
-  store float 0.000000e+00, float* %ref.tmp328, align 4
-  %call329 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp325, float* %ref.tmp326, float* %ref.tmp327, float* %ref.tmp328)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp324, float* %scale.addr, %class.btVector3* %ref.tmp325)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp324)
-  %call330 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call330, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp333, align 4
-  store float 0x3FC7AE1480000000, float* %ref.tmp334, align 4
-  store float 0.000000e+00, float* %ref.tmp335, align 4
-  %call336 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp332, float* %ref.tmp333, float* %ref.tmp334, float* %ref.tmp335)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp331, float* %scale.addr, %class.btVector3* %ref.tmp332)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp331)
-  %call337 = call noalias i8* @_Znwm(i32 780)
-  %130 = bitcast i8* %call337 to %class.btHingeConstraint*
-  %m_bodies338 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx339 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies338, i32 0, i32 3
-  %131 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx339, align 4
-  %m_bodies340 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx341 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies340, i32 0, i32 4
-  %132 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx341, align 4
-  %call344 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %130, %class.btRigidBody* %131, %class.btRigidBody* %132, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
+  store ptr %call308, ptr %coneC, align 4
+  %92 = load ptr, ptr %coneC, align 4
+  call void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr %92, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
+  %93 = load ptr, ptr %coneC, align 4
+  %m_joints316 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx317 = getelementptr inbounds [10 x ptr], ptr %m_joints316, i32 0, i32 2
+  store ptr %93, ptr %arrayidx317, align 4
+  %m_ownerWorld318 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %94 = load ptr, ptr %m_ownerWorld318, align 4
+  %vtable319 = load ptr, ptr %94
+  %vfn320 = getelementptr inbounds ptr, ptr %vtable319, i64 10
+  %95 = load ptr, ptr %vfn320
+  %m_joints321 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx322 = getelementptr inbounds [10 x ptr], ptr %m_joints321, i32 0, i32 2
+  %96 = load ptr, ptr %arrayidx322, align 4
+  call void %95(ptr %94, ptr %96, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call323 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call323, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp326, align 4
+  store float 0xBFCCCCCCC0000000, ptr %ref.tmp327, align 4
+  store float 0.000000e+00, ptr %ref.tmp328, align 4
+  %call329 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp325, ptr %ref.tmp326, ptr %ref.tmp327, ptr %ref.tmp328)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp324, ptr %scale.addr, ptr %ref.tmp325)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp324)
+  %call330 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call330, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp333, align 4
+  store float 0x3FC7AE1480000000, ptr %ref.tmp334, align 4
+  store float 0.000000e+00, ptr %ref.tmp335, align 4
+  %call336 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp332, ptr %ref.tmp333, ptr %ref.tmp334, ptr %ref.tmp335)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp331, ptr %scale.addr, ptr %ref.tmp332)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp331)
+  %call337 = call noalias ptr @_Znwm(i32 780)
+  %m_bodies338 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx339 = getelementptr inbounds [11 x ptr], ptr %m_bodies338, i32 0, i32 3
+  %97 = load ptr, ptr %arrayidx339, align 4
+  %m_bodies340 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx341 = getelementptr inbounds [11 x ptr], ptr %m_bodies340, i32 0, i32 4
+  %98 = load ptr, ptr %arrayidx341, align 4
+  %call344 = invoke ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr %call337, ptr %97, ptr %98, ptr %localA, ptr %localB, i1 zeroext false)
           to label %invoke.cont343 unwind label %lpad342
 
 invoke.cont343:                                   ; preds = %invoke.cont314
-  store %class.btHingeConstraint* %130, %class.btHingeConstraint** %hingeC, align 4
-  %133 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %133, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
-  %134 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  %135 = bitcast %class.btHingeConstraint* %134 to %class.btTypedConstraint*
-  %m_joints345 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx346 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints345, i32 0, i32 3
-  store %class.btTypedConstraint* %135, %class.btTypedConstraint** %arrayidx346, align 4
-  %m_ownerWorld347 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %136 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld347, align 4
-  %137 = bitcast %class.btDynamicsWorld* %136 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable348 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %137
-  %vfn349 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable348, i64 10
-  %138 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn349
-  %m_joints350 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx351 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints350, i32 0, i32 3
-  %139 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx351, align 4
-  call void %138(%class.btDynamicsWorld* %136, %class.btTypedConstraint* %139, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call352 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call352, float 0.000000e+00, float 0.000000e+00, float 0x3FE921FB60000000)
-  store float 0x3FC70A3D80000000, float* %ref.tmp355, align 4
-  store float 0xBFB99999A0000000, float* %ref.tmp356, align 4
-  store float 0.000000e+00, float* %ref.tmp357, align 4
-  %call358 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp354, float* %ref.tmp355, float* %ref.tmp356, float* %ref.tmp357)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp353, float* %scale.addr, %class.btVector3* %ref.tmp354)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp353)
-  %call359 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call359, float 0.000000e+00, float 0.000000e+00, float 0x3FE921FB60000000)
-  store float 0.000000e+00, float* %ref.tmp362, align 4
-  store float 0x3FCCCCCCC0000000, float* %ref.tmp363, align 4
-  store float 0.000000e+00, float* %ref.tmp364, align 4
-  %call365 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp361, float* %ref.tmp362, float* %ref.tmp363, float* %ref.tmp364)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp360, float* %scale.addr, %class.btVector3* %ref.tmp361)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp360)
-  %call366 = call noalias i8* @_Znwm(i32 628)
-  %140 = bitcast i8* %call366 to %class.btConeTwistConstraint*
-  %m_bodies367 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx368 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies367, i32 0, i32 0
-  %141 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx368, align 4
-  %m_bodies369 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx370 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies369, i32 0, i32 5
-  %142 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx370, align 4
-  %call373 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %140, %class.btRigidBody* %141, %class.btRigidBody* %142, %class.btTransform* %localA, %class.btTransform* %localB)
+  store ptr %call337, ptr %hingeC, align 4
+  %99 = load ptr, ptr %hingeC, align 4
+  call void @_ZN17btHingeConstraint8setLimitEfffff(ptr %99, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
+  %100 = load ptr, ptr %hingeC, align 4
+  %m_joints345 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx346 = getelementptr inbounds [10 x ptr], ptr %m_joints345, i32 0, i32 3
+  store ptr %100, ptr %arrayidx346, align 4
+  %m_ownerWorld347 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %101 = load ptr, ptr %m_ownerWorld347, align 4
+  %vtable348 = load ptr, ptr %101
+  %vfn349 = getelementptr inbounds ptr, ptr %vtable348, i64 10
+  %102 = load ptr, ptr %vfn349
+  %m_joints350 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx351 = getelementptr inbounds [10 x ptr], ptr %m_joints350, i32 0, i32 3
+  %103 = load ptr, ptr %arrayidx351, align 4
+  call void %102(ptr %101, ptr %103, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call352 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call352, float 0.000000e+00, float 0.000000e+00, float 0x3FE921FB60000000)
+  store float 0x3FC70A3D80000000, ptr %ref.tmp355, align 4
+  store float 0xBFB99999A0000000, ptr %ref.tmp356, align 4
+  store float 0.000000e+00, ptr %ref.tmp357, align 4
+  %call358 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp354, ptr %ref.tmp355, ptr %ref.tmp356, ptr %ref.tmp357)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp353, ptr %scale.addr, ptr %ref.tmp354)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp353)
+  %call359 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call359, float 0.000000e+00, float 0.000000e+00, float 0x3FE921FB60000000)
+  store float 0.000000e+00, ptr %ref.tmp362, align 4
+  store float 0x3FCCCCCCC0000000, ptr %ref.tmp363, align 4
+  store float 0.000000e+00, ptr %ref.tmp364, align 4
+  %call365 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp361, ptr %ref.tmp362, ptr %ref.tmp363, ptr %ref.tmp364)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp360, ptr %scale.addr, ptr %ref.tmp361)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp360)
+  %call366 = call noalias ptr @_Znwm(i32 628)
+  %m_bodies367 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %104 = load ptr, ptr %m_bodies367, align 4
+  %m_bodies369 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx370 = getelementptr inbounds [11 x ptr], ptr %m_bodies369, i32 0, i32 5
+  %105 = load ptr, ptr %arrayidx370, align 4
+  %call373 = invoke ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr %call366, ptr %104, ptr %105, ptr %localA, ptr %localB)
           to label %invoke.cont372 unwind label %lpad371
 
 invoke.cont372:                                   ; preds = %invoke.cont343
-  store %class.btConeTwistConstraint* %140, %class.btConeTwistConstraint** %coneC, align 4
-  %143 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %143, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
-  %144 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  %145 = bitcast %class.btConeTwistConstraint* %144 to %class.btTypedConstraint*
-  %m_joints374 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx375 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints374, i32 0, i32 4
-  store %class.btTypedConstraint* %145, %class.btTypedConstraint** %arrayidx375, align 4
-  %m_ownerWorld376 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %146 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld376, align 4
-  %147 = bitcast %class.btDynamicsWorld* %146 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable377 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %147
-  %vfn378 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable377, i64 10
-  %148 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn378
-  %m_joints379 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx380 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints379, i32 0, i32 4
-  %149 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx380, align 4
-  call void %148(%class.btDynamicsWorld* %146, %class.btTypedConstraint* %149, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call381 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call381, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp384, align 4
-  store float 0xBFCCCCCCC0000000, float* %ref.tmp385, align 4
-  store float 0.000000e+00, float* %ref.tmp386, align 4
-  %call387 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp383, float* %ref.tmp384, float* %ref.tmp385, float* %ref.tmp386)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp382, float* %scale.addr, %class.btVector3* %ref.tmp383)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp382)
-  %call388 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call388, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp391, align 4
-  store float 0x3FC7AE1480000000, float* %ref.tmp392, align 4
-  store float 0.000000e+00, float* %ref.tmp393, align 4
-  %call394 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp390, float* %ref.tmp391, float* %ref.tmp392, float* %ref.tmp393)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp389, float* %scale.addr, %class.btVector3* %ref.tmp390)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp389)
-  %call395 = call noalias i8* @_Znwm(i32 780)
-  %150 = bitcast i8* %call395 to %class.btHingeConstraint*
-  %m_bodies396 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx397 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies396, i32 0, i32 5
-  %151 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx397, align 4
-  %m_bodies398 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx399 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies398, i32 0, i32 6
-  %152 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx399, align 4
-  %call402 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %150, %class.btRigidBody* %151, %class.btRigidBody* %152, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
+  store ptr %call366, ptr %coneC, align 4
+  %106 = load ptr, ptr %coneC, align 4
+  call void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr %106, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
+  %107 = load ptr, ptr %coneC, align 4
+  %m_joints374 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx375 = getelementptr inbounds [10 x ptr], ptr %m_joints374, i32 0, i32 4
+  store ptr %107, ptr %arrayidx375, align 4
+  %m_ownerWorld376 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %108 = load ptr, ptr %m_ownerWorld376, align 4
+  %vtable377 = load ptr, ptr %108
+  %vfn378 = getelementptr inbounds ptr, ptr %vtable377, i64 10
+  %109 = load ptr, ptr %vfn378
+  %m_joints379 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx380 = getelementptr inbounds [10 x ptr], ptr %m_joints379, i32 0, i32 4
+  %110 = load ptr, ptr %arrayidx380, align 4
+  call void %109(ptr %108, ptr %110, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call381 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call381, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp384, align 4
+  store float 0xBFCCCCCCC0000000, ptr %ref.tmp385, align 4
+  store float 0.000000e+00, ptr %ref.tmp386, align 4
+  %call387 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp383, ptr %ref.tmp384, ptr %ref.tmp385, ptr %ref.tmp386)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp382, ptr %scale.addr, ptr %ref.tmp383)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp382)
+  %call388 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call388, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp391, align 4
+  store float 0x3FC7AE1480000000, ptr %ref.tmp392, align 4
+  store float 0.000000e+00, ptr %ref.tmp393, align 4
+  %call394 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp390, ptr %ref.tmp391, ptr %ref.tmp392, ptr %ref.tmp393)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp389, ptr %scale.addr, ptr %ref.tmp390)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp389)
+  %call395 = call noalias ptr @_Znwm(i32 780)
+  %m_bodies396 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx397 = getelementptr inbounds [11 x ptr], ptr %m_bodies396, i32 0, i32 5
+  %111 = load ptr, ptr %arrayidx397, align 4
+  %m_bodies398 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx399 = getelementptr inbounds [11 x ptr], ptr %m_bodies398, i32 0, i32 6
+  %112 = load ptr, ptr %arrayidx399, align 4
+  %call402 = invoke ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr %call395, ptr %111, ptr %112, ptr %localA, ptr %localB, i1 zeroext false)
           to label %invoke.cont401 unwind label %lpad400
 
 invoke.cont401:                                   ; preds = %invoke.cont372
-  store %class.btHingeConstraint* %150, %class.btHingeConstraint** %hingeC, align 4
-  %153 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %153, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
-  %154 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  %155 = bitcast %class.btHingeConstraint* %154 to %class.btTypedConstraint*
-  %m_joints403 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx404 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints403, i32 0, i32 5
-  store %class.btTypedConstraint* %155, %class.btTypedConstraint** %arrayidx404, align 4
-  %m_ownerWorld405 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %156 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld405, align 4
-  %157 = bitcast %class.btDynamicsWorld* %156 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable406 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %157
-  %vfn407 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable406, i64 10
-  %158 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn407
-  %m_joints408 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx409 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints408, i32 0, i32 5
-  %159 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx409, align 4
-  call void %158(%class.btDynamicsWorld* %156, %class.btTypedConstraint* %159, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call410 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call410, float 0.000000e+00, float 0.000000e+00, float 0x400921FB60000000)
-  store float 0xBFC99999A0000000, float* %ref.tmp413, align 4
-  store float 0x3FC3333340000000, float* %ref.tmp414, align 4
-  store float 0.000000e+00, float* %ref.tmp415, align 4
-  %call416 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp412, float* %ref.tmp413, float* %ref.tmp414, float* %ref.tmp415)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp411, float* %scale.addr, %class.btVector3* %ref.tmp412)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp411)
-  %call417 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call417, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  store float 0.000000e+00, float* %ref.tmp420, align 4
-  store float 0xBFC70A3D80000000, float* %ref.tmp421, align 4
-  store float 0.000000e+00, float* %ref.tmp422, align 4
-  %call423 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp419, float* %ref.tmp420, float* %ref.tmp421, float* %ref.tmp422)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp418, float* %scale.addr, %class.btVector3* %ref.tmp419)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp418)
-  %call424 = call noalias i8* @_Znwm(i32 628)
-  %160 = bitcast i8* %call424 to %class.btConeTwistConstraint*
-  %m_bodies425 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx426 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies425, i32 0, i32 1
-  %161 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx426, align 4
-  %m_bodies427 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx428 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies427, i32 0, i32 7
-  %162 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx428, align 4
-  %call431 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %160, %class.btRigidBody* %161, %class.btRigidBody* %162, %class.btTransform* %localA, %class.btTransform* %localB)
+  store ptr %call395, ptr %hingeC, align 4
+  %113 = load ptr, ptr %hingeC, align 4
+  call void @_ZN17btHingeConstraint8setLimitEfffff(ptr %113, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
+  %114 = load ptr, ptr %hingeC, align 4
+  %m_joints403 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx404 = getelementptr inbounds [10 x ptr], ptr %m_joints403, i32 0, i32 5
+  store ptr %114, ptr %arrayidx404, align 4
+  %m_ownerWorld405 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %115 = load ptr, ptr %m_ownerWorld405, align 4
+  %vtable406 = load ptr, ptr %115
+  %vfn407 = getelementptr inbounds ptr, ptr %vtable406, i64 10
+  %116 = load ptr, ptr %vfn407
+  %m_joints408 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx409 = getelementptr inbounds [10 x ptr], ptr %m_joints408, i32 0, i32 5
+  %117 = load ptr, ptr %arrayidx409, align 4
+  call void %116(ptr %115, ptr %117, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call410 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call410, float 0.000000e+00, float 0.000000e+00, float 0x400921FB60000000)
+  store float 0xBFC99999A0000000, ptr %ref.tmp413, align 4
+  store float 0x3FC3333340000000, ptr %ref.tmp414, align 4
+  store float 0.000000e+00, ptr %ref.tmp415, align 4
+  %call416 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp412, ptr %ref.tmp413, ptr %ref.tmp414, ptr %ref.tmp415)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp411, ptr %scale.addr, ptr %ref.tmp412)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp411)
+  %call417 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call417, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  store float 0.000000e+00, ptr %ref.tmp420, align 4
+  store float 0xBFC70A3D80000000, ptr %ref.tmp421, align 4
+  store float 0.000000e+00, ptr %ref.tmp422, align 4
+  %call423 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp419, ptr %ref.tmp420, ptr %ref.tmp421, ptr %ref.tmp422)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp418, ptr %scale.addr, ptr %ref.tmp419)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp418)
+  %call424 = call noalias ptr @_Znwm(i32 628)
+  %m_bodies425 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx426 = getelementptr inbounds [11 x ptr], ptr %m_bodies425, i32 0, i32 1
+  %118 = load ptr, ptr %arrayidx426, align 4
+  %m_bodies427 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx428 = getelementptr inbounds [11 x ptr], ptr %m_bodies427, i32 0, i32 7
+  %119 = load ptr, ptr %arrayidx428, align 4
+  %call431 = invoke ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr %call424, ptr %118, ptr %119, ptr %localA, ptr %localB)
           to label %invoke.cont430 unwind label %lpad429
 
 invoke.cont430:                                   ; preds = %invoke.cont401
-  store %class.btConeTwistConstraint* %160, %class.btConeTwistConstraint** %coneC, align 4
-  %163 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %163, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
-  %164 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  %165 = bitcast %class.btConeTwistConstraint* %164 to %class.btTypedConstraint*
-  %m_joints432 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx433 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints432, i32 0, i32 6
-  store %class.btTypedConstraint* %165, %class.btTypedConstraint** %arrayidx433, align 4
-  %m_ownerWorld434 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %166 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld434, align 4
-  %167 = bitcast %class.btDynamicsWorld* %166 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable435 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %167
-  %vfn436 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable435, i64 10
-  %168 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn436
-  %m_joints437 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx438 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints437, i32 0, i32 6
-  %169 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx438, align 4
-  call void %168(%class.btDynamicsWorld* %166, %class.btTypedConstraint* %169, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call439 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call439, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp442, align 4
-  store float 0x3FC70A3D80000000, float* %ref.tmp443, align 4
-  store float 0.000000e+00, float* %ref.tmp444, align 4
-  %call445 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp441, float* %ref.tmp442, float* %ref.tmp443, float* %ref.tmp444)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp440, float* %scale.addr, %class.btVector3* %ref.tmp441)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp440)
-  %call446 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call446, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp449, align 4
-  store float 0xBFC1EB8520000000, float* %ref.tmp450, align 4
-  store float 0.000000e+00, float* %ref.tmp451, align 4
-  %call452 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp448, float* %ref.tmp449, float* %ref.tmp450, float* %ref.tmp451)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp447, float* %scale.addr, %class.btVector3* %ref.tmp448)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp447)
-  %call453 = call noalias i8* @_Znwm(i32 780)
-  %170 = bitcast i8* %call453 to %class.btHingeConstraint*
-  %m_bodies454 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx455 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies454, i32 0, i32 7
-  %171 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx455, align 4
-  %m_bodies456 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx457 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies456, i32 0, i32 8
-  %172 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx457, align 4
-  %call460 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %170, %class.btRigidBody* %171, %class.btRigidBody* %172, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
+  store ptr %call424, ptr %coneC, align 4
+  %120 = load ptr, ptr %coneC, align 4
+  call void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr %120, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
+  %121 = load ptr, ptr %coneC, align 4
+  %m_joints432 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx433 = getelementptr inbounds [10 x ptr], ptr %m_joints432, i32 0, i32 6
+  store ptr %121, ptr %arrayidx433, align 4
+  %m_ownerWorld434 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %122 = load ptr, ptr %m_ownerWorld434, align 4
+  %vtable435 = load ptr, ptr %122
+  %vfn436 = getelementptr inbounds ptr, ptr %vtable435, i64 10
+  %123 = load ptr, ptr %vfn436
+  %m_joints437 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx438 = getelementptr inbounds [10 x ptr], ptr %m_joints437, i32 0, i32 6
+  %124 = load ptr, ptr %arrayidx438, align 4
+  call void %123(ptr %122, ptr %124, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call439 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call439, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp442, align 4
+  store float 0x3FC70A3D80000000, ptr %ref.tmp443, align 4
+  store float 0.000000e+00, ptr %ref.tmp444, align 4
+  %call445 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp441, ptr %ref.tmp442, ptr %ref.tmp443, ptr %ref.tmp444)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp440, ptr %scale.addr, ptr %ref.tmp441)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp440)
+  %call446 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call446, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp449, align 4
+  store float 0xBFC1EB8520000000, ptr %ref.tmp450, align 4
+  store float 0.000000e+00, ptr %ref.tmp451, align 4
+  %call452 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp448, ptr %ref.tmp449, ptr %ref.tmp450, ptr %ref.tmp451)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp447, ptr %scale.addr, ptr %ref.tmp448)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp447)
+  %call453 = call noalias ptr @_Znwm(i32 780)
+  %m_bodies454 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx455 = getelementptr inbounds [11 x ptr], ptr %m_bodies454, i32 0, i32 7
+  %125 = load ptr, ptr %arrayidx455, align 4
+  %m_bodies456 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx457 = getelementptr inbounds [11 x ptr], ptr %m_bodies456, i32 0, i32 8
+  %126 = load ptr, ptr %arrayidx457, align 4
+  %call460 = invoke ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr %call453, ptr %125, ptr %126, ptr %localA, ptr %localB, i1 zeroext false)
           to label %invoke.cont459 unwind label %lpad458
 
 invoke.cont459:                                   ; preds = %invoke.cont430
-  store %class.btHingeConstraint* %170, %class.btHingeConstraint** %hingeC, align 4
-  %173 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %173, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
-  %174 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  %175 = bitcast %class.btHingeConstraint* %174 to %class.btTypedConstraint*
-  %m_joints461 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx462 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints461, i32 0, i32 7
-  store %class.btTypedConstraint* %175, %class.btTypedConstraint** %arrayidx462, align 4
-  %m_ownerWorld463 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %176 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld463, align 4
-  %177 = bitcast %class.btDynamicsWorld* %176 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable464 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %177
-  %vfn465 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable464, i64 10
-  %178 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn465
-  %m_joints466 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx467 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints466, i32 0, i32 7
-  %179 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx467, align 4
-  call void %178(%class.btDynamicsWorld* %176, %class.btTypedConstraint* %179, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call468 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call468, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00)
-  store float 0x3FC99999A0000000, float* %ref.tmp471, align 4
-  store float 0x3FC3333340000000, float* %ref.tmp472, align 4
-  store float 0.000000e+00, float* %ref.tmp473, align 4
-  %call474 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp470, float* %ref.tmp471, float* %ref.tmp472, float* %ref.tmp473)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp469, float* %scale.addr, %class.btVector3* %ref.tmp470)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp469)
-  %call475 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call475, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
-  store float 0.000000e+00, float* %ref.tmp478, align 4
-  store float 0xBFC70A3D80000000, float* %ref.tmp479, align 4
-  store float 0.000000e+00, float* %ref.tmp480, align 4
-  %call481 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp477, float* %ref.tmp478, float* %ref.tmp479, float* %ref.tmp480)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp476, float* %scale.addr, %class.btVector3* %ref.tmp477)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp476)
-  %call482 = call noalias i8* @_Znwm(i32 628)
-  %180 = bitcast i8* %call482 to %class.btConeTwistConstraint*
-  %m_bodies483 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx484 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies483, i32 0, i32 1
-  %181 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx484, align 4
-  %m_bodies485 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx486 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies485, i32 0, i32 9
-  %182 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx486, align 4
-  %call489 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %180, %class.btRigidBody* %181, %class.btRigidBody* %182, %class.btTransform* %localA, %class.btTransform* %localB)
+  store ptr %call453, ptr %hingeC, align 4
+  %127 = load ptr, ptr %hingeC, align 4
+  call void @_ZN17btHingeConstraint8setLimitEfffff(ptr %127, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
+  %128 = load ptr, ptr %hingeC, align 4
+  %m_joints461 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx462 = getelementptr inbounds [10 x ptr], ptr %m_joints461, i32 0, i32 7
+  store ptr %128, ptr %arrayidx462, align 4
+  %m_ownerWorld463 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %129 = load ptr, ptr %m_ownerWorld463, align 4
+  %vtable464 = load ptr, ptr %129
+  %vfn465 = getelementptr inbounds ptr, ptr %vtable464, i64 10
+  %130 = load ptr, ptr %vfn465
+  %m_joints466 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx467 = getelementptr inbounds [10 x ptr], ptr %m_joints466, i32 0, i32 7
+  %131 = load ptr, ptr %arrayidx467, align 4
+  call void %130(ptr %129, ptr %131, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call468 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call468, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00)
+  store float 0x3FC99999A0000000, ptr %ref.tmp471, align 4
+  store float 0x3FC3333340000000, ptr %ref.tmp472, align 4
+  store float 0.000000e+00, ptr %ref.tmp473, align 4
+  %call474 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp470, ptr %ref.tmp471, ptr %ref.tmp472, ptr %ref.tmp473)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp469, ptr %scale.addr, ptr %ref.tmp470)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp469)
+  %call475 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call475, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
+  store float 0.000000e+00, ptr %ref.tmp478, align 4
+  store float 0xBFC70A3D80000000, ptr %ref.tmp479, align 4
+  store float 0.000000e+00, ptr %ref.tmp480, align 4
+  %call481 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp477, ptr %ref.tmp478, ptr %ref.tmp479, ptr %ref.tmp480)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp476, ptr %scale.addr, ptr %ref.tmp477)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp476)
+  %call482 = call noalias ptr @_Znwm(i32 628)
+  %m_bodies483 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx484 = getelementptr inbounds [11 x ptr], ptr %m_bodies483, i32 0, i32 1
+  %132 = load ptr, ptr %arrayidx484, align 4
+  %m_bodies485 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx486 = getelementptr inbounds [11 x ptr], ptr %m_bodies485, i32 0, i32 9
+  %133 = load ptr, ptr %arrayidx486, align 4
+  %call489 = invoke ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr %call482, ptr %132, ptr %133, ptr %localA, ptr %localB)
           to label %invoke.cont488 unwind label %lpad487
 
 invoke.cont488:                                   ; preds = %invoke.cont459
-  store %class.btConeTwistConstraint* %180, %class.btConeTwistConstraint** %coneC, align 4
-  %183 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %183, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
-  %184 = load %class.btConeTwistConstraint*, %class.btConeTwistConstraint** %coneC, align 4
-  %185 = bitcast %class.btConeTwistConstraint* %184 to %class.btTypedConstraint*
-  %m_joints490 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx491 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints490, i32 0, i32 8
-  store %class.btTypedConstraint* %185, %class.btTypedConstraint** %arrayidx491, align 4
-  %m_ownerWorld492 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %186 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld492, align 4
-  %187 = bitcast %class.btDynamicsWorld* %186 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable493 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %187
-  %vfn494 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable493, i64 10
-  %188 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn494
-  %m_joints495 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx496 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints495, i32 0, i32 8
-  %189 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx496, align 4
-  call void %188(%class.btDynamicsWorld* %186, %class.btTypedConstraint* %189, i1 zeroext true)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
-  call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localB)
-  %call497 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localA)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call497, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp500, align 4
-  store float 0x3FC70A3D80000000, float* %ref.tmp501, align 4
-  store float 0.000000e+00, float* %ref.tmp502, align 4
-  %call503 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp499, float* %ref.tmp500, float* %ref.tmp501, float* %ref.tmp502)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp498, float* %scale.addr, %class.btVector3* %ref.tmp499)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localA, %class.btVector3* %ref.tmp498)
-  %call504 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %localB)
-  call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call504, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
-  store float 0.000000e+00, float* %ref.tmp507, align 4
-  store float 0xBFC1EB8520000000, float* %ref.tmp508, align 4
-  store float 0.000000e+00, float* %ref.tmp509, align 4
-  %call510 = call %class.btVector3* @_ZN9btVector3C1ERKfS1_S1_(%class.btVector3* %ref.tmp506, float* %ref.tmp507, float* %ref.tmp508, float* %ref.tmp509)
-  call void @_ZmlRKfRK9btVector3(%class.btVector3* sret(%class.btVector3) %ref.tmp505, float* %scale.addr, %class.btVector3* %ref.tmp506)
-  call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp505)
-  %call511 = call noalias i8* @_Znwm(i32 780)
-  %190 = bitcast i8* %call511 to %class.btHingeConstraint*
-  %m_bodies512 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx513 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies512, i32 0, i32 9
-  %191 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx513, align 4
-  %m_bodies514 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
-  %arrayidx515 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies514, i32 0, i32 10
-  %192 = load %class.btRigidBody*, %class.btRigidBody** %arrayidx515, align 4
-  %call518 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %190, %class.btRigidBody* %191, %class.btRigidBody* %192, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
+  store ptr %call482, ptr %coneC, align 4
+  %134 = load ptr, ptr %coneC, align 4
+  call void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr %134, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
+  %135 = load ptr, ptr %coneC, align 4
+  %m_joints490 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx491 = getelementptr inbounds [10 x ptr], ptr %m_joints490, i32 0, i32 8
+  store ptr %135, ptr %arrayidx491, align 4
+  %m_ownerWorld492 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %136 = load ptr, ptr %m_ownerWorld492, align 4
+  %vtable493 = load ptr, ptr %136
+  %vfn494 = getelementptr inbounds ptr, ptr %vtable493, i64 10
+  %137 = load ptr, ptr %vfn494
+  %m_joints495 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx496 = getelementptr inbounds [10 x ptr], ptr %m_joints495, i32 0, i32 8
+  %138 = load ptr, ptr %arrayidx496, align 4
+  call void %137(ptr %136, ptr %138, i1 zeroext true)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localA)
+  call void @_ZN11btTransform11setIdentityEv(ptr %localB)
+  %call497 = call ptr @_ZN11btTransform8getBasisEv(ptr %localA)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call497, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp500, align 4
+  store float 0x3FC70A3D80000000, ptr %ref.tmp501, align 4
+  store float 0.000000e+00, ptr %ref.tmp502, align 4
+  %call503 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp499, ptr %ref.tmp500, ptr %ref.tmp501, ptr %ref.tmp502)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp498, ptr %scale.addr, ptr %ref.tmp499)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localA, ptr %ref.tmp498)
+  %call504 = call ptr @_ZN11btTransform8getBasisEv(ptr %localB)
+  call void @_ZN11btMatrix3x311setEulerZYXEfff(ptr %call504, float 0.000000e+00, float 0x3FF921FB60000000, float 0.000000e+00)
+  store float 0.000000e+00, ptr %ref.tmp507, align 4
+  store float 0xBFC1EB8520000000, ptr %ref.tmp508, align 4
+  store float 0.000000e+00, ptr %ref.tmp509, align 4
+  %call510 = call ptr @_ZN9btVector3C1ERKfS1_S1_(ptr %ref.tmp506, ptr %ref.tmp507, ptr %ref.tmp508, ptr %ref.tmp509)
+  call void @_ZmlRKfRK9btVector3(ptr sret(%class.btVector3) %ref.tmp505, ptr %scale.addr, ptr %ref.tmp506)
+  call void @_ZN11btTransform9setOriginERK9btVector3(ptr %localB, ptr %ref.tmp505)
+  %call511 = call noalias ptr @_Znwm(i32 780)
+  %m_bodies512 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx513 = getelementptr inbounds [11 x ptr], ptr %m_bodies512, i32 0, i32 9
+  %139 = load ptr, ptr %arrayidx513, align 4
+  %m_bodies514 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 3
+  %arrayidx515 = getelementptr inbounds [11 x ptr], ptr %m_bodies514, i32 0, i32 10
+  %140 = load ptr, ptr %arrayidx515, align 4
+  %call518 = invoke ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr %call511, ptr %139, ptr %140, ptr %localA, ptr %localB, i1 zeroext false)
           to label %invoke.cont517 unwind label %lpad516
 
 invoke.cont517:                                   ; preds = %invoke.cont488
-  store %class.btHingeConstraint* %190, %class.btHingeConstraint** %hingeC, align 4
-  %193 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %193, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
-  %194 = load %class.btHingeConstraint*, %class.btHingeConstraint** %hingeC, align 4
-  %195 = bitcast %class.btHingeConstraint* %194 to %class.btTypedConstraint*
-  %m_joints519 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx520 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints519, i32 0, i32 9
-  store %class.btTypedConstraint* %195, %class.btTypedConstraint** %arrayidx520, align 4
-  %m_ownerWorld521 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
-  %196 = load %class.btDynamicsWorld*, %class.btDynamicsWorld** %m_ownerWorld521, align 4
-  %197 = bitcast %class.btDynamicsWorld* %196 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
-  %vtable522 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)**, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %197
-  %vfn523 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable522, i64 10
-  %198 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn523
-  %m_joints524 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
-  %arrayidx525 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints524, i32 0, i32 9
-  %199 = load %class.btTypedConstraint*, %class.btTypedConstraint** %arrayidx525, align 4
-  call void %198(%class.btDynamicsWorld* %196, %class.btTypedConstraint* %199, i1 zeroext true)
-  %200 = load %class.RagDoll*, %class.RagDoll** %retval
-  ret %class.RagDoll* %200
+  store ptr %call511, ptr %hingeC, align 4
+  %141 = load ptr, ptr %hingeC, align 4
+  call void @_ZN17btHingeConstraint8setLimitEfffff(ptr %141, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
+  %142 = load ptr, ptr %hingeC, align 4
+  %m_joints519 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx520 = getelementptr inbounds [10 x ptr], ptr %m_joints519, i32 0, i32 9
+  store ptr %142, ptr %arrayidx520, align 4
+  %m_ownerWorld521 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 1
+  %143 = load ptr, ptr %m_ownerWorld521, align 4
+  %vtable522 = load ptr, ptr %143
+  %vfn523 = getelementptr inbounds ptr, ptr %vtable522, i64 10
+  %144 = load ptr, ptr %vfn523
+  %m_joints524 = getelementptr inbounds %class.RagDoll, ptr %this1, i32 0, i32 4
+  %arrayidx525 = getelementptr inbounds [10 x ptr], ptr %m_joints524, i32 0, i32 9
+  %145 = load ptr, ptr %arrayidx525, align 4
+  call void %144(ptr %143, ptr %145, i1 zeroext true)
+  %146 = load ptr, ptr %retval
+  ret ptr %146
 
 lpad258:                                          ; preds = %for.end
-  %201 = landingpad { i8*, i32 }
+  %147 = landingpad { ptr, i32 }
           cleanup
-  %202 = extractvalue { i8*, i32 } %201, 0
-  store i8* %202, i8** %exn.slot
-  %203 = extractvalue { i8*, i32 } %201, 1
-  store i32 %203, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call253) nounwind
+  %148 = extractvalue { ptr, i32 } %147, 0
+  store ptr %148, ptr %exn.slot
+  %149 = extractvalue { ptr, i32 } %147, 1
+  store i32 %149, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call253) nounwind
   br label %eh.resume
 
 lpad284:                                          ; preds = %invoke.cont259
-  %204 = landingpad { i8*, i32 }
+  %150 = landingpad { ptr, i32 }
           cleanup
-  %205 = extractvalue { i8*, i32 } %204, 0
-  store i8* %205, i8** %exn.slot
-  %206 = extractvalue { i8*, i32 } %204, 1
-  store i32 %206, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call279) nounwind
+  %151 = extractvalue { ptr, i32 } %150, 0
+  store ptr %151, ptr %exn.slot
+  %152 = extractvalue { ptr, i32 } %150, 1
+  store i32 %152, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call279) nounwind
   br label %eh.resume
 
 lpad313:                                          ; preds = %invoke.cont285
-  %207 = landingpad { i8*, i32 }
+  %153 = landingpad { ptr, i32 }
           cleanup
-  %208 = extractvalue { i8*, i32 } %207, 0
-  store i8* %208, i8** %exn.slot
-  %209 = extractvalue { i8*, i32 } %207, 1
-  store i32 %209, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call308) nounwind
+  %154 = extractvalue { ptr, i32 } %153, 0
+  store ptr %154, ptr %exn.slot
+  %155 = extractvalue { ptr, i32 } %153, 1
+  store i32 %155, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call308) nounwind
   br label %eh.resume
 
 lpad342:                                          ; preds = %invoke.cont314
-  %210 = landingpad { i8*, i32 }
+  %156 = landingpad { ptr, i32 }
           cleanup
-  %211 = extractvalue { i8*, i32 } %210, 0
-  store i8* %211, i8** %exn.slot
-  %212 = extractvalue { i8*, i32 } %210, 1
-  store i32 %212, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call337) nounwind
+  %157 = extractvalue { ptr, i32 } %156, 0
+  store ptr %157, ptr %exn.slot
+  %158 = extractvalue { ptr, i32 } %156, 1
+  store i32 %158, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call337) nounwind
   br label %eh.resume
 
 lpad371:                                          ; preds = %invoke.cont343
-  %213 = landingpad { i8*, i32 }
+  %159 = landingpad { ptr, i32 }
           cleanup
-  %214 = extractvalue { i8*, i32 } %213, 0
-  store i8* %214, i8** %exn.slot
-  %215 = extractvalue { i8*, i32 } %213, 1
-  store i32 %215, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call366) nounwind
+  %160 = extractvalue { ptr, i32 } %159, 0
+  store ptr %160, ptr %exn.slot
+  %161 = extractvalue { ptr, i32 } %159, 1
+  store i32 %161, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call366) nounwind
   br label %eh.resume
 
 lpad400:                                          ; preds = %invoke.cont372
-  %216 = landingpad { i8*, i32 }
+  %162 = landingpad { ptr, i32 }
           cleanup
-  %217 = extractvalue { i8*, i32 } %216, 0
-  store i8* %217, i8** %exn.slot
-  %218 = extractvalue { i8*, i32 } %216, 1
-  store i32 %218, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call395) nounwind
+  %163 = extractvalue { ptr, i32 } %162, 0
+  store ptr %163, ptr %exn.slot
+  %164 = extractvalue { ptr, i32 } %162, 1
+  store i32 %164, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call395) nounwind
   br label %eh.resume
 
 lpad429:                                          ; preds = %invoke.cont401
-  %219 = landingpad { i8*, i32 }
+  %165 = landingpad { ptr, i32 }
           cleanup
-  %220 = extractvalue { i8*, i32 } %219, 0
-  store i8* %220, i8** %exn.slot
-  %221 = extractvalue { i8*, i32 } %219, 1
-  store i32 %221, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call424) nounwind
+  %166 = extractvalue { ptr, i32 } %165, 0
+  store ptr %166, ptr %exn.slot
+  %167 = extractvalue { ptr, i32 } %165, 1
+  store i32 %167, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call424) nounwind
   br label %eh.resume
 
 lpad458:                                          ; preds = %invoke.cont430
-  %222 = landingpad { i8*, i32 }
+  %168 = landingpad { ptr, i32 }
           cleanup
-  %223 = extractvalue { i8*, i32 } %222, 0
-  store i8* %223, i8** %exn.slot
-  %224 = extractvalue { i8*, i32 } %222, 1
-  store i32 %224, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call453) nounwind
+  %169 = extractvalue { ptr, i32 } %168, 0
+  store ptr %169, ptr %exn.slot
+  %170 = extractvalue { ptr, i32 } %168, 1
+  store i32 %170, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call453) nounwind
   br label %eh.resume
 
 lpad487:                                          ; preds = %invoke.cont459
-  %225 = landingpad { i8*, i32 }
+  %171 = landingpad { ptr, i32 }
           cleanup
-  %226 = extractvalue { i8*, i32 } %225, 0
-  store i8* %226, i8** %exn.slot
-  %227 = extractvalue { i8*, i32 } %225, 1
-  store i32 %227, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call482) nounwind
+  %172 = extractvalue { ptr, i32 } %171, 0
+  store ptr %172, ptr %exn.slot
+  %173 = extractvalue { ptr, i32 } %171, 1
+  store i32 %173, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call482) nounwind
   br label %eh.resume
 
 lpad516:                                          ; preds = %invoke.cont488
-  %228 = landingpad { i8*, i32 }
+  %174 = landingpad { ptr, i32 }
           cleanup
-  %229 = extractvalue { i8*, i32 } %228, 0
-  store i8* %229, i8** %exn.slot
-  %230 = extractvalue { i8*, i32 } %228, 1
-  store i32 %230, i32* %ehselector.slot
-  call void @_ZdlPv(i8* %call511) nounwind
+  %175 = extractvalue { ptr, i32 } %174, 0
+  store ptr %175, ptr %exn.slot
+  %176 = extractvalue { ptr, i32 } %174, 1
+  store i32 %176, ptr %ehselector.slot
+  call void @_ZdlPv(ptr %call511) nounwind
   br label %eh.resume
 
 eh.resume:                                        ; preds = %lpad516, %lpad487, %lpad458, %lpad429, %lpad400, %lpad371, %lpad342, %lpad313, %lpad284, %lpad258, %invoke.cont92, %invoke.cont83, %invoke.cont74, %invoke.cont65, %invoke.cont56, %invoke.cont47, %invoke.cont38, %invoke.cont29, %invoke.cont20, %invoke.cont11, %invoke.cont4
-  %exn = load i8*, i8** %exn.slot
-  %sel = load i32, i32* %ehselector.slot
-  %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0
-  %lpad.val526 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1
-  resume { i8*, i32 } %lpad.val526
+  %exn = load ptr, ptr %exn.slot
+  %sel = load i32, ptr %ehselector.slot
+  %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn, 0
+  %lpad.val526 = insertvalue { ptr, i32 } %lpad.val, i32 %sel, 1
+  resume { ptr, i32 } %lpad.val526
 
 terminate.lpad:                                   ; preds = %lpad89, %lpad80, %lpad71, %lpad62, %lpad53, %lpad44, %lpad35, %lpad26, %lpad17, %lpad8, %lpad
-  %231 = landingpad { i8*, i32 }
-          catch i8* null
+  %177 = landingpad { ptr, i32 }
+          catch ptr null
   call void @_ZSt9terminatev() noreturn nounwind
   unreachable
 }
 
-declare void @_ZmlRKfRK9btVector3(%class.btVector3* noalias sret(%class.btVector3), float*, %class.btVector3*) inlinehint ssp
+declare void @_ZmlRKfRK9btVector3(ptr noalias sret(%class.btVector3), ptr, ptr) inlinehint ssp
 
-declare %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll*, float, %class.btTransform*, %class.btCollisionShape*) ssp align 2
+declare ptr @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(ptr, float, ptr, ptr) ssp align 2
 
-declare void @_ZNK11btTransformmlERKS_(%class.btTransform* noalias sret(%class.btTransform), %class.btTransform*, %class.btTransform*) inlinehint ssp align 2
+declare void @_ZNK11btTransformmlERKS_(ptr noalias sret(%class.btTransform), ptr, ptr) inlinehint ssp align 2
 
-declare void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3*, float, float, float) ssp align 2
+declare void @_ZN11btMatrix3x311setEulerZYXEfff(ptr, float, float, float) ssp align 2
 
-declare void @_ZN11btRigidBody10setDampingEff(%class.btRigidBody*, float, float)
+declare void @_ZN11btRigidBody10setDampingEff(ptr, float, float)
 
-declare void @_ZN17btCollisionObject19setDeactivationTimeEf(%class.btCollisionObject*, float) nounwind ssp align 2
+declare void @_ZN17btCollisionObject19setDeactivationTimeEf(ptr, float) nounwind ssp align 2
 
-declare void @_ZN11btRigidBody21setSleepingThresholdsEff(%class.btRigidBody*, float, float) nounwind ssp align 2
+declare void @_ZN11btRigidBody21setSleepingThresholdsEff(ptr, float, float) nounwind ssp align 2
 
-declare %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint*, %class.btRigidBody*, %class.btRigidBody*, %class.btTransform*, %class.btTransform*, i1 zeroext)
+declare ptr @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(ptr, ptr, ptr, ptr, ptr, i1 zeroext)
 
-declare void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint*, float, float, float, float, float) ssp align 2
+declare void @_ZN17btHingeConstraint8setLimitEfffff(ptr, float, float, float, float, float) ssp align 2
 
-declare %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint*, %class.btRigidBody*, %class.btRigidBody*, %class.btTransform*, %class.btTransform*)
+declare ptr @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(ptr, ptr, ptr, ptr, ptr)
 
-declare void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint*, float, float, float, float, float, float) nounwind ssp align 2
+declare void @_ZN21btConeTwistConstraint8setLimitEffffff(ptr, float, float, float, float, float, float) nounwind ssp align 2

diff  --git a/llvm/test/CodeGen/Thumb2/crash.ll b/llvm/test/CodeGen/Thumb2/crash.ll
index fb32a2cac3a12..6a40a1001f2cb 100644
--- a/llvm/test/CodeGen/Thumb2/crash.ll
+++ b/llvm/test/CodeGen/Thumb2/crash.ll
@@ -4,25 +4,20 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
 target triple = "thumbv7-apple-darwin10"
 
 ; This function would crash LiveIntervalAnalysis by creating a chain of 4 INSERT_SUBREGs of the same register.
-define arm_apcscc void @NEON_vst4q_u32(i32* nocapture %sp0, i32* nocapture %sp1, i32* nocapture %sp2, i32* nocapture %sp3, i32* %dp) nounwind {
+define arm_apcscc void @NEON_vst4q_u32(ptr nocapture %sp0, ptr nocapture %sp1, ptr nocapture %sp2, ptr nocapture %sp3, ptr %dp) nounwind {
 entry:
-  %0 = bitcast i32* %sp0 to <4 x i32>*            ; <<4 x i32>*> [#uses=1]
-  %1 = load <4 x i32>, <4 x i32>* %0, align 16               ; <<4 x i32>> [#uses=1]
-  %2 = bitcast i32* %sp1 to <4 x i32>*            ; <<4 x i32>*> [#uses=1]
-  %3 = load <4 x i32>, <4 x i32>* %2, align 16               ; <<4 x i32>> [#uses=1]
-  %4 = bitcast i32* %sp2 to <4 x i32>*            ; <<4 x i32>*> [#uses=1]
-  %5 = load <4 x i32>, <4 x i32>* %4, align 16               ; <<4 x i32>> [#uses=1]
-  %6 = bitcast i32* %sp3 to <4 x i32>*            ; <<4 x i32>*> [#uses=1]
-  %7 = load <4 x i32>, <4 x i32>* %6, align 16               ; <<4 x i32>> [#uses=1]
-  %8 = bitcast i32* %dp to i8*                    ; <i8*> [#uses=1]
-  tail call void @llvm.arm.neon.vst4.p0i8.v4i32(i8* %8, <4 x i32> %1, <4 x i32> %3, <4 x i32> %5, <4 x i32> %7, i32 1)
+  %0 = load <4 x i32>, ptr %sp0, align 16               ; <<4 x i32>> [#uses=1]
+  %1 = load <4 x i32>, ptr %sp1, align 16               ; <<4 x i32>> [#uses=1]
+  %2 = load <4 x i32>, ptr %sp2, align 16               ; <<4 x i32>> [#uses=1]
+  %3 = load <4 x i32>, ptr %sp3, align 16               ; <<4 x i32>> [#uses=1]
+  tail call void @llvm.arm.neon.vst4.p0.v4i32(ptr %dp, <4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, i32 1)
   ret void
 }
 
-declare void @llvm.arm.neon.vst4.p0i8.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst4.p0.v4i32(ptr, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
 
- at sbuf = common global [16 x i32] zeroinitializer, align 16 ; <[16 x i32]*> [#uses=5]
- at dbuf = common global [16 x i32] zeroinitializer  ; <[16 x i32]*> [#uses=2]
+ at sbuf = common global [16 x i32] zeroinitializer, align 16 ; <ptr> [#uses=5]
+ at dbuf = common global [16 x i32] zeroinitializer  ; <ptr> [#uses=2]
 
 ; This function creates 4 chained INSERT_SUBREGS and then invokes the register scavenger.
 ; The first INSERT_SUBREG needs an <undef> use operand for that to work.
@@ -32,56 +27,56 @@ bb.nph:
 
 bb:                                               ; preds = %bb, %bb.nph
   %0 = phi i32 [ 0, %bb.nph ], [ %1, %bb ]        ; <i32> [#uses=4]
-  %scevgep = getelementptr [16 x i32], [16 x i32]* @sbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
-  %scevgep5 = getelementptr [16 x i32], [16 x i32]* @dbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
-  store i32 %0, i32* %scevgep, align 4
-  store i32 -1, i32* %scevgep5, align 4
+  %scevgep = getelementptr [16 x i32], ptr @sbuf, i32 0, i32 %0 ; <ptr> [#uses=1]
+  %scevgep5 = getelementptr [16 x i32], ptr @dbuf, i32 0, i32 %0 ; <ptr> [#uses=1]
+  store i32 %0, ptr %scevgep, align 4
+  store i32 -1, ptr %scevgep5, align 4
   %1 = add nsw i32 %0, 1                          ; <i32> [#uses=2]
   %exitcond = icmp eq i32 %1, 16                  ; <i1> [#uses=1]
   br i1 %exitcond, label %bb2, label %bb
 
 bb2:                                              ; preds = %bb
-  %2 = load <4 x i32>, <4 x i32>* bitcast ([16 x i32]* @sbuf to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
-  %3 = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @sbuf, i32 0, i32 4) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
-  %4 = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @sbuf, i32 0, i32 8) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
-  %5 = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32], [16 x i32]* @sbuf, i32 0, i32 12) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
-  tail call void @llvm.arm.neon.vst4.p0i8.v4i32(i8* bitcast ([16 x i32]* @dbuf to i8*), <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, i32 1) nounwind
+  %2 = load <4 x i32>, ptr @sbuf, align 16 ; <<4 x i32>> [#uses=1]
+  %3 = load <4 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @sbuf, i32 0, i32 4), align 16 ; <<4 x i32>> [#uses=1]
+  %4 = load <4 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @sbuf, i32 0, i32 8), align 16 ; <<4 x i32>> [#uses=1]
+  %5 = load <4 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @sbuf, i32 0, i32 12), align 16 ; <<4 x i32>> [#uses=1]
+  tail call void @llvm.arm.neon.vst4.p0.v4i32(ptr @dbuf, <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, i32 1) nounwind
   ret i32 0
 }
 
 ; PR12389
 ; Make sure the DPair register class can spill.
-define void @pr12389(i8* %p) nounwind ssp {
+define void @pr12389(ptr %p) nounwind ssp {
 entry:
-  %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %p, i32 1)
+  %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr %p, i32 1)
   tail call void asm sideeffect "", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15}"() nounwind
-  tail call void @llvm.arm.neon.vst1.p0i8.v4f32(i8* %p, <4 x float> %vld1, i32 1)
+  tail call void @llvm.arm.neon.vst1.p0.v4f32(ptr %p, <4 x float> %vld1, i32 1)
   ret void
 }
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr, i32) nounwind readonly
 
-declare void @llvm.arm.neon.vst1.p0i8.v4f32(i8*, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst1.p0.v4f32(ptr, <4 x float>, i32) nounwind
 
 ; <rdar://problem/11101911>
 ; When an strd is expanded into two str instructions, make sure the first str
 ; doesn't kill the base register. This can happen if the base register is the
 ; same as the data register.
-%class = type { i8*, %class*, i32 }
-define void @f11101911(%class* %this, i32 %num) ssp align 2 {
+%class = type { ptr, ptr, i32 }
+define void @f11101911(ptr %this, i32 %num) ssp align 2 {
 entry:
-  %p1 = getelementptr inbounds %class, %class* %this, i32 0, i32 1
-  %p2 = getelementptr inbounds %class, %class* %this, i32 0, i32 2
+  %p1 = getelementptr inbounds %class, ptr %this, i32 0, i32 1
+  %p2 = getelementptr inbounds %class, ptr %this, i32 0, i32 2
   tail call void asm sideeffect "", "~{r1},~{r3},~{r5},~{r11},~{r13}"() nounwind
-  store %class* %this, %class** %p1, align 4
-  store i32 %num, i32* %p2, align 4
+  store ptr %this, ptr %p1, align 4
+  store i32 %num, ptr %p2, align 4
   ret void
 }
 
 ; Check RAFast handling of inline assembly with many dense clobbers.
 ; The large tuple aliases of the vector registers can cause problems.
-define void @rdar13249625(double* nocapture %p) nounwind {
+define void @rdar13249625(ptr nocapture %p) nounwind {
   %1 = tail call double asm sideeffect "@ $0", "=w,~{d0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15}"() nounwind
-  store double %1, double* %p, align 4
+  store double %1, ptr %p, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll b/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
index c71c3ca576288..299132b93332e 100644
--- a/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
+++ b/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
@@ -1,12 +1,12 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8
 
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+%struct.FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
 %struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
+%struct.__sbuf = type { ptr, i32 }
 
-declare i32 @fgetc(%struct.FILE* nocapture) nounwind
+declare i32 @fgetc(ptr nocapture) nounwind
 
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind {
 entry:
   br i1 undef, label %bb, label %bb1
 
@@ -20,7 +20,7 @@ bb.i1:                                            ; preds = %bb1
   unreachable
 
 bb1.i2:                                           ; preds = %bb1
-  %0 = call  i32 @fgetc(%struct.FILE* undef) nounwind ; <i32> [#uses=0]
+  %0 = call  i32 @fgetc(ptr undef) nounwind ; <i32> [#uses=0]
   br i1 undef, label %bb2.i3, label %bb3.i4
 
 bb2.i3:                                           ; preds = %bb1.i2
@@ -44,7 +44,7 @@ bb27:                                             ; preds = %get_image.exit
 bb.i:                                             ; preds = %bb.i, %bb27
   %1 = fptrunc double undef to float              ; <float> [#uses=1]
   %2 = fptoui float %1 to i8                      ; <i8> [#uses=1]
-  store i8 %2, i8* undef, align 1
+  store i8 %2, ptr undef, align 1
   br label %bb.i
 
 bb28:                                             ; preds = %get_image.exit

diff  --git a/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll b/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
index b9ec0b43f551d..aa75e35731eca 100644
--- a/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
+++ b/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s
 
-define void @fht(float* nocapture %fz, i16 signext %n) nounwind {
+define void @fht(ptr nocapture %fz, i16 signext %n) nounwind {
 ; CHECK-LABEL: fht:
 entry:
   br label %bb5
@@ -26,27 +26,27 @@ bb8:                                              ; preds = %bb8, %bb7
 ; CHECK-NOT: vmov.f32
 ; CHECK: blt
   %tmp54 = add i32 0, %tmp53                      ; <i32> [#uses=0]
-  %fi.1 = getelementptr float, float* %fz, i32 undef     ; <float*> [#uses=2]
+  %fi.1 = getelementptr float, ptr %fz, i32 undef     ; <ptr> [#uses=2]
   %tmp80 = add i32 0, %tmp79                      ; <i32> [#uses=1]
-  %scevgep81 = getelementptr float, float* %fz, i32 %tmp80 ; <float*> [#uses=1]
-  %2 = load float, float* undef, align 4                 ; <float> [#uses=1]
+  %scevgep81 = getelementptr float, ptr %fz, i32 %tmp80 ; <ptr> [#uses=1]
+  %2 = load float, ptr undef, align 4                 ; <float> [#uses=1]
   %3 = fmul float %2, %1                          ; <float> [#uses=1]
-  %4 = load float, float* null, align 4                  ; <float> [#uses=2]
+  %4 = load float, ptr null, align 4                  ; <float> [#uses=2]
   %5 = fmul float %4, %0                          ; <float> [#uses=1]
   %6 = fsub float %3, %5                          ; <float> [#uses=1]
   %7 = fmul float %4, %1                          ; <float> [#uses=1]
   %8 = fadd float %7, %7
-  %9 = load float, float* %fi.1, align 4                 ; <float> [#uses=2]
+  %9 = load float, ptr %fi.1, align 4                 ; <float> [#uses=2]
   %10 = fsub float %9, %8                         ; <float> [#uses=1]
   %11 = fadd float %9, %8                         ; <float> [#uses=1]
   %12 = fsub float 0.000000e+00, %6               ; <float> [#uses=1]
   %13 = fsub float 0.000000e+00, %12
   %14 = fmul float %0, %0
   %15 = fadd float %14, %14
-  %16 = load float, float* %scevgep81, align 4           ; <float> [#uses=2]
+  %16 = load float, ptr %scevgep81, align 4           ; <float> [#uses=2]
   %17 = fsub float %16, %15                       ; <float> [#uses=1]
   %18 = fadd float %16, %15                       ; <float> [#uses=2]
-  %19 = load float, float* undef, align 4                ; <float> [#uses=2]
+  %19 = load float, ptr undef, align 4                ; <float> [#uses=2]
   %20 = fsub float %19, %13                       ; <float> [#uses=2]
   %21 = fadd float %19, %13                       ; <float> [#uses=1]
   %22 = fmul float %s1.02, %18                    ; <float> [#uses=1]
@@ -56,14 +56,14 @@ bb8:                                              ; preds = %bb8, %bb7
   %26 = fmul float %s1.02, %20                    ; <float> [#uses=1]
   %27 = fadd float %25, %26                       ; <float> [#uses=1]
   %28 = fadd float %11, %27                       ; <float> [#uses=1]
-  store float %28, float* %fi.1, align 4
+  store float %28, ptr %fi.1, align 4
   %29 = fadd float %12, %24                       ; <float> [#uses=1]
-  store float %29, float* null, align 4
+  store float %29, ptr null, align 4
   %30 = fmul float 0.000000e+00, %21              ; <float> [#uses=1]
   %31 = fmul float %s1.02, %17                    ; <float> [#uses=1]
   %32 = fsub float %30, %31                       ; <float> [#uses=1]
   %33 = fsub float %10, %32                       ; <float> [#uses=1]
-  store float %33, float* undef, align 4
+  store float %33, ptr undef, align 4
   %34 = icmp slt i32 %tmp54, %tmp80               ; <i1> [#uses=1]
   br i1 %34, label %bb8, label %bb9
 

diff  --git a/llvm/test/CodeGen/Thumb2/emit-unwinding.ll b/llvm/test/CodeGen/Thumb2/emit-unwinding.ll
index 13d509acd13cf..1931343de2457 100644
--- a/llvm/test/CodeGen/Thumb2/emit-unwinding.ll
+++ b/llvm/test/CodeGen/Thumb2/emit-unwinding.ll
@@ -21,6 +21,5 @@ declare void @foo2()
 
 define void @bar() {
   %a1 = alloca [3800 x i8], align 4
-  %p = getelementptr inbounds [3800 x i8], [3800 x i8]* %a1, i32 0, i32 0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/fir.ll b/llvm/test/CodeGen/Thumb2/fir.ll
index 8133f2b45f654..918993f1fb933 100644
--- a/llvm/test/CodeGen/Thumb2/fir.ll
+++ b/llvm/test/CodeGen/Thumb2/fir.ll
@@ -2,7 +2,7 @@
 ; RUN: llc --verify-machineinstrs -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve %s -o - | FileCheck %s -check-prefix=CHECK
 ; RUN: llc --verify-machineinstrs -mtriple=thumbv8.1m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK
 
-define void @test1(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
+define void @test1(ptr %p0, ptr %p1, ptr %p2, ptr %pDst) {
 ; CHECK-LABEL: test1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r1]
@@ -13,21 +13,21 @@ define void @test1(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
 ; CHECK-NEXT:    str r0, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l3 = load i32, i32* %p0, align 4
-  %l4 = load i32, i32* %p1, align 4
+  %l3 = load i32, ptr %p0, align 4
+  %l4 = load i32, ptr %p1, align 4
   %conv5.us = sext i32 %l4 to i64
-  %l5 = load i32, i32* %p2, align 4
+  %l5 = load i32, ptr %p2, align 4
   %conv6.us = sext i32 %l5 to i64
   %mul.us = mul nsw i64 %conv6.us, %conv5.us
   %l6 = lshr i64 %mul.us, 31
   %l7 = trunc i64 %l6 to i32
   %shl.us = and i32 %l7, -2
   %add.us = add nsw i32 %shl.us, %l3
-  store i32 %add.us, i32* %pDst, align 4
+  store i32 %add.us, ptr %pDst, align 4
   ret void
 }
 
-define void @test2(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
+define void @test2(ptr %p0, ptr %p1, ptr %p2, ptr %pDst) {
 ; CHECK-LABEL: test2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r1]
@@ -38,16 +38,16 @@ define void @test2(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
 ; CHECK-NEXT:    str r0, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l3 = load i32, i32* %p0, align 4
-  %l4 = load i32, i32* %p1, align 4
+  %l3 = load i32, ptr %p0, align 4
+  %l4 = load i32, ptr %p1, align 4
   %conv5.us = sext i32 %l4 to i64
-  %l5 = load i32, i32* %p2, align 4
+  %l5 = load i32, ptr %p2, align 4
   %conv6.us = sext i32 %l5 to i64
   %mul.us = mul nsw i64 %conv6.us, %conv5.us
   %l6 = lshr i64 %mul.us, 32
   %shl74.us = shl nuw nsw i64 %l6, 1
   %shl.us = trunc i64 %shl74.us to i32
   %add.us = add nsw i32 %l3, %shl.us
-  store i32 %add.us, i32* %pDst, align 4
+  store i32 %add.us, ptr %pDst, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/float-ops.ll b/llvm/test/CodeGen/Thumb2/float-ops.ll
index 709bd49f22860..51f18afaf0a46 100644
--- a/llvm/test/CodeGen/Thumb2/float-ops.ll
+++ b/llvm/test/CodeGen/Thumb2/float-ops.ll
@@ -98,41 +98,41 @@ entry:
   ret double %0
 }
 
-define float @load_f(float* %a) {
+define float @load_f(ptr %a) {
 entry:
 ; CHECK-LABEL: load_f:
 ; NONE: ldr r0, [r0]
 ; HARD: vldr s0, [r0]
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   ret float %0
 }
 
-define double @load_d(double* %a) {
+define double @load_d(ptr %a) {
 entry:
 ; CHECK-LABEL: load_d:
 ; NOREGS: ldm r0, {r0, r1}
 ; ONLYREGS: vldr d0, [r0]
 ; HARD: vldr d0, [r0]
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   ret double %0
 }
 
-define void @store_f(float* %a, float %b) {
+define void @store_f(ptr %a, float %b) {
 entry:
 ; CHECK-LABEL: store_f:
 ; NONE: str r1, [r0]
 ; HARD: vstr s0, [r0]
-  store float %b, float* %a, align 4
+  store float %b, ptr %a, align 4
   ret void
 }
 
-define void @store_d(double* %a, double %b) {
+define void @store_d(ptr %a, double %b) {
 entry:
 ; CHECK-LABEL: store_d:
 ; NOREGS: strd r2, r3, [r0]
 ; ONLYREGS: strd r2, r3, [r0]
 ; HARD: vstr d0, [r0]
-  store double %b, double* %a, align 8
+  store double %b, ptr %a, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/frameless2.ll b/llvm/test/CodeGen/Thumb2/frameless2.ll
index c8aa82f6030b1..4750527ae555c 100644
--- a/llvm/test/CodeGen/Thumb2/frameless2.ll
+++ b/llvm/test/CodeGen/Thumb2/frameless2.ll
@@ -3,10 +3,10 @@
 %struct.noise3 = type { [3 x [17 x i32]] }
 %struct.noiseguard = type { i32, i32, i32 }
 
-define void @vorbis_encode_noisebias_setup(i8* nocapture %vi.0.7.val, double %s, i32 %block, i32* nocapture %suppress, %struct.noise3* nocapture %in, %struct.noiseguard* nocapture %guard, double %userbias) nounwind {
+define void @vorbis_encode_noisebias_setup(ptr nocapture %vi.0.7.val, double %s, i32 %block, ptr nocapture %suppress, ptr nocapture %in, ptr nocapture %guard, double %userbias) nounwind {
 entry:
-  %0 = getelementptr %struct.noiseguard, %struct.noiseguard* %guard, i32 %block, i32 2; <i32*> [#uses=1]
-  %1 = load i32, i32* %0, align 4                      ; <i32> [#uses=1]
-  store i32 %1, i32* undef, align 4
+  %0 = getelementptr %struct.noiseguard, ptr %guard, i32 %block, i32 2; <ptr> [#uses=1]
+  %1 = load i32, ptr %0, align 4                      ; <i32> [#uses=1]
+  store i32 %1, ptr undef, align 4
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll b/llvm/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
index 8d338dad9bc7e..7149e72033304 100644
--- a/llvm/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
+++ b/llvm/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
@@ -7,13 +7,13 @@ declare void @otherfn()
 ; CHECK: itt ne
 ; CHECK: movne
 ; CHECK: strne
-define i32 @triangle1(i32 %n, i32* %p) {
+define i32 @triangle1(i32 %n, ptr %p) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  store i32 1, i32* %p, align 4
+  store i32 1, ptr %p, align 4
   br label %if.end
 
 if.end:
@@ -32,14 +32,14 @@ if.end:
 ; CHECK-NOBP: str
 ; CHECK-NOBP: movs
 ; CHECK-NOBP: str
-define i32 @triangle2(i32 %n, i32* %p, i32* %q) {
+define i32 @triangle2(i32 %n, ptr %p, ptr %q) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  store i32 1, i32* %p, align 4
-  store i32 2, i32* %q, align 4
+  store i32 1, ptr %p, align 4
+  store i32 2, ptr %q, align 4
   br label %if.end
 
 if.end:
@@ -55,16 +55,16 @@ if.end:
 ; CHECK: str
 ; CHECK: movs
 ; CHECK: str
-define i32 @triangle3(i32 %n, i32* %p, i32* %q, i32* %r) {
+define i32 @triangle3(i32 %n, ptr %p, ptr %q, ptr %r) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  store i32 1, i32* %p, align 4
-  store i32 2, i32* %q, align 4
-  store i32 3, i32* %r, align 4
-  store i32 4, i32* %p, align 4
+  store i32 1, ptr %p, align 4
+  store i32 2, ptr %q, align 4
+  store i32 3, ptr %r, align 4
+  store i32 4, ptr %p, align 4
   br label %if.end
 
 if.end:
@@ -76,17 +76,17 @@ if.end:
 ; CHECK: itee eq
 ; CHECK: ldreq
 ; CHECK: strne
-define i32 @diamond1(i32 %n, i32* %p) {
+define i32 @diamond1(i32 %n, ptr %p) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
-  store i32 %n, i32* %p, align 4
+  store i32 %n, ptr %p, align 4
   br label %if.end
 
 if.else:
-  %0 = load i32, i32* %p, align 4
+  %0 = load i32, ptr %p, align 4
   br label %if.end
 
 if.end:
@@ -107,19 +107,19 @@ if.end:
 ; CHECK-NOBP: addeq
 ; CHECK-NOBP: strne
 ; CHECK-NOBP: strne
-define i32 @diamond2(i32 %n, i32* %p, i32* %q) {
+define i32 @diamond2(i32 %n, ptr %p, ptr %q) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
-  store i32 %n, i32* %p, align 4
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 2
-  store i32 %n, i32* %arrayidx, align 4
+  store i32 %n, ptr %p, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 2
+  store i32 %n, ptr %arrayidx, align 4
   br label %if.end
 
 if.else:
-  store i32 %n, i32* %q, align 4
+  store i32 %n, ptr %q, align 4
   %0 = add i32 %n, 10
   br label %if.end
 
@@ -137,18 +137,18 @@ if.end:
 ; CHECK: ldr
 ; CHECK: ldr
 ; CHECK: adds
-define i32 @diamond3(i32 %n, i32* %p, i32* %q) {
+define i32 @diamond3(i32 %n, ptr %p, ptr %q) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
-  store i32 1, i32* %p, align 4
+  store i32 1, ptr %p, align 4
   br label %if.end
 
 if.else:
-  %0 = load i32, i32* %p, align 4
-  %1 = load i32, i32* %q, align 4
+  %0 = load i32, ptr %p, align 4
+  %1 = load i32, ptr %q, align 4
   %add = add nsw i32 %1, %0
   br label %if.end
 

diff  --git a/llvm/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll b/llvm/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
index be539a6c62009..9acdd7e4a8adb 100644
--- a/llvm/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
+++ b/llvm/test/CodeGen/Thumb2/ifcvt-rescan-bug-2016-08-22.ll
@@ -3,7 +3,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv7-unknown-linux-gnueabihf"
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: nounwind
 declare void @_ZNSaIcEC2Ev() unnamed_addr #0 align 2
@@ -25,7 +25,7 @@ define hidden void @_ZN4llvm14DOTGraphTraitsIPNS_13ScheduleDAGMIEE17getEdgeAttri
   br label %3
 
 ; <label>:2:                                      ; preds = %0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* undef) #0
+  call void @llvm.lifetime.start.p0(i64 1, ptr undef) #0
   call void @_ZNSaIcEC2Ev() #0
   br label %3
 

diff  --git a/llvm/test/CodeGen/Thumb2/ifcvt-rescan-diamonds.ll b/llvm/test/CodeGen/Thumb2/ifcvt-rescan-diamonds.ll
index 56f2a1ec550fc..931a48b5c7976 100644
--- a/llvm/test/CodeGen/Thumb2/ifcvt-rescan-diamonds.ll
+++ b/llvm/test/CodeGen/Thumb2/ifcvt-rescan-diamonds.ll
@@ -29,7 +29,7 @@ cond.true77:                                      ; preds = %while.cond38
   br i1 %cmp79, label %cond.end84, label %cond.false81
 
 cond.false81:                                     ; preds = %cond.true77
-  %0 = load i32, i32* null, align 4
+  %0 = load i32, ptr null, align 4
   br label %cond.end84
 
 cond.end84:                                       ; preds = %cond.false81, %cond.true77
@@ -45,14 +45,14 @@ cond.false91:                                     ; preds = %cond.false87
 
 cond.false101:                                    ; preds = %cond.false91, %cond.false87, %cond.end84
   %cond97 = phi i32 [ %neg86, %cond.end84 ], [ %b, %cond.false91 ], [ 0, %cond.false87 ]
-  %1 = load i32, i32* null, align 4
+  %1 = load i32, ptr null, align 4
   %and106 = and i32 %cond97, %1
   %and107 = and i32 %and106, 2
   %tobool108 = icmp ne i32 %and107, 0
   br i1 %tobool108, label %if.then109, label %if.end111
 
 if.then109:                                       ; preds = %cond.false101
-  store i32 0, i32* undef, align 4
+  store i32 0, ptr undef, align 4
   br label %if.end111
 
 if.end111:                                        ; preds = %if.then109, %cond.false101

diff  --git a/llvm/test/CodeGen/Thumb2/inflate-regs.ll b/llvm/test/CodeGen/Thumb2/inflate-regs.ll
index 4814db281bfef..ea8edab377661 100644
--- a/llvm/test/CodeGen/Thumb2/inflate-regs.ll
+++ b/llvm/test/CodeGen/Thumb2/inflate-regs.ll
@@ -12,12 +12,12 @@ target triple = "thumbv7-apple-ios"
 ; CHECK-NOT: vstr
 ; CHECK: vorr
 ; CHECK: vstr s
-define void @local_split(float* nocapture %p) nounwind ssp {
+define void @local_split(ptr nocapture %p) nounwind ssp {
 entry:
-  %x = load float, float* %p, align 4
+  %x = load float, ptr %p, align 4
   %a = fadd float %x, 1.0
   tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
-  store float %a, float* %p, align 4
+  store float %a, ptr %p, align 4
   ret void
 }
 
@@ -31,19 +31,19 @@ entry:
 ; CHECK-NOT: vstr
 ; CHECK: vorr
 ; CHECK: vstr s
-define void @global_split(float* nocapture %p1, float* nocapture %p2) nounwind ssp {
+define void @global_split(ptr nocapture %p1, ptr nocapture %p2) nounwind ssp {
 entry:
-  %0 = load float, float* %p1, align 4
+  %0 = load float, ptr %p1, align 4
   %add = fadd float %0, 1.000000e+00
   tail call void asm sideeffect "", "~{d0},~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwind
   %cmp = fcmp ogt float %add, 0.000000e+00
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store float %add, float* %p2, align 4
+  store float %add, ptr %p2, align 4
   br label %if.end
 
 if.end:
-  store float %add, float* %p1, align 4
+  store float %add, ptr %p1, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/intrinsics-coprocessor.ll b/llvm/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
index 70f1e5e663cb0..73365f2dd7105 100644
--- a/llvm/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
+++ b/llvm/test/CodeGen/Thumb2/intrinsics-coprocessor.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=thumbv7-eabi -mcpu=cortex-a8 -show-mc-encoding | FileCheck %s
-define void @coproc(i8* %i) nounwind {
+define void @coproc(ptr %i) nounwind {
 entry:
   ; CHECK: mrc p7, #1, r{{[0-9]+}}, c1, c1, #4
   %0 = tail call i32 @llvm.arm.mrc(i32 7, i32 1, i32 1, i32 1, i32 4) nounwind
@@ -18,21 +18,21 @@ entry:
   ; CHECK: cdp2 p7, #3, c1, c1, c1, #5
   tail call void @llvm.arm.cdp2(i32 7, i32 3, i32 1, i32 1, i32 1, i32 5) nounwind
   ; CHECK: ldc p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.ldc(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.ldc(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: ldcl p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.ldcl(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.ldcl(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: ldc2 p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.ldc2(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.ldc2(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: ldc2l p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.ldc2l(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.ldc2l(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: stc p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.stc(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.stc(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: stcl p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.stcl(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.stcl(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: stc2 p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.stc2(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.stc2(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: stc2l p7, c3, [r{{[0-9]+}}]
-  tail call void @llvm.arm.stc2l(i32 7, i32 3, i8* %i) nounwind
+  tail call void @llvm.arm.stc2l(i32 7, i32 3, ptr %i) nounwind
   ; CHECK: mrrc p1, #2, r{{[0-9]+}}, r{{[0-9]+}}, c3
   %2 = tail call { i32, i32 } @llvm.arm.mrrc(i32 1, i32 2, i32 3) nounwind
   ; CHECK: mrrc2 p1, #2, r{{[0-9]+}}, r{{[0-9]+}}, c3
@@ -56,21 +56,21 @@ if.end:
   ret void
 }
 
-declare void @llvm.arm.ldc(i32, i32, i8*) nounwind
+declare void @llvm.arm.ldc(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.ldcl(i32, i32, i8*) nounwind
+declare void @llvm.arm.ldcl(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.ldc2(i32, i32, i8*) nounwind
+declare void @llvm.arm.ldc2(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.ldc2l(i32, i32, i8*) nounwind
+declare void @llvm.arm.ldc2l(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.stc(i32, i32, i8*) nounwind
+declare void @llvm.arm.stc(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.stcl(i32, i32, i8*) nounwind
+declare void @llvm.arm.stcl(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.stc2(i32, i32, i8*) nounwind
+declare void @llvm.arm.stc2(i32, i32, ptr) nounwind
 
-declare void @llvm.arm.stc2l(i32, i32, i8*) nounwind
+declare void @llvm.arm.stc2l(i32, i32, ptr) nounwind
 
 declare void @llvm.arm.cdp2(i32, i32, i32, i32, i32, i32) nounwind
 

diff  --git a/llvm/test/CodeGen/Thumb2/large-call.ll b/llvm/test/CodeGen/Thumb2/large-call.ll
index 9ec622ef6fbb3..11a2d04acd909 100644
--- a/llvm/test/CodeGen/Thumb2/large-call.ll
+++ b/llvm/test/CodeGen/Thumb2/large-call.ll
@@ -20,11 +20,11 @@ target triple = "thumbv7-apple-ios0.0.0"
 define i32 @main() ssp {
 entry:
   %d = alloca double, align 8
-  store double 1.000000e+00, double* %d, align 8
-  %0 = load double, double* %d, align 8
-  call void (i8*, i8*, i8*, ...) @variadic(i8* null, i8* null, i8* null, i32 1, double 1.234800e+03, double 2.363450e+03, double %0, i32 1, double 1.234560e+03, double 2.345670e+03, double 4.6334563e+03, double 2.423440e+03, double 4.234330e+03, double 2.965430e+03, i32 1, double 4.669300e+03, double 2.927500e+03, double 4.663100e+03, double 2.921000e+03, double 4.663100e+03, double 2.345100e+03, i32 1, double 3.663100e+03, double 2.905100e+03, double 4.669300e+03, double 2.898600e+03, double 4.676900e+03, double 2.898600e+03, i32 1, double 4.684600e+03, double 2.898600e+03, double 1.234800e+03, double 2.905100e+03, double 1.234800e+03, double 2.345100e+03, i32 1, double 7.719700e+03, double 2.920500e+03, double 4.713500e+03, double 2.927000e+03, double 4.705800e+03, double 2.927000e+03, i32 1, double 8.698200e+03, double 2.927000e+03, double 4.692000e+03, double 2.920500e+03, double 4.692000e+03, double 2.912500e+03, i32 1, double 4.692000e+03, double 2.945600e+03, double 4.698200e+03, double 2.898100e+03, double 4.705800e+03, double 2.898100e+03, i32 1, double 4.713500e+03, double 2.898100e+03, double 4.719700e+03, double 2.945600e+03, double 4.719700e+03, double 2.912500e+03, i32 1, double 4.749200e+03, double 2.920100e+03, double 4.743000e+03, double 2.926600e+03, double 4.735300e+03, double 2.926600e+03, i32 1, double 4.727700e+03, double 2.926600e+03, double 4.721500e+03, double 2.920100e+03, double 4.721500e+03, double 2.912100e+03, i32 1, double 4.721500e+03, double 2.945100e+03, double 4.727700e+03, double 2.897700e+03, double 4.735300e+03, double 2.897700e+03, i32 1, double 4.743000e+03, double 2.897700e+03, double 4.749200e+03, double 2.945100e+03, double 4.749200e+03, double 2.912100e+03, i32 1, double 4.778200e+03, double 2.920100e+03, double 4.772000e+03, double 2.926600e+03, double 4.764300e+03, double 2.926600e+03, i32 1, double 4.756700e+03, double 2.926600e+03, double 4.750500e+03, double 2.920100e+03, double 4.750500e+03, double 2.912100e+03, i32 1, double 4.750500e+03, double 2.945100e+03, double 4.756700e+03, double 2.897700e+03, double 4.764300e+03, double 2.897700e+03, i32 1, double 4.772000e+03, double 2.897700e+03, double 4.778200e+03, double 2.945100e+03, double 4.778200e+03, double 2.912100e+03, i32 1, double 4.801900e+03, double 2.942100e+03, double 4.795700e+03, double 2.948500e+03, double 4.788100e+03, double 2.948500e+03, i32 1, double 4.780500e+03, double 2.948500e+03, double 4.774300e+03, double 2.942100e+03, double 4.774300e+03, double 2.934100e+03, i32 1, double 4.774300e+03, double 2.926100e+03, double 4.780500e+03, double 2.919600e+03, double 4.788100e+03, double 2.919600e+03, i32 1, double 4.795700e+03, double 2.919600e+03, double 4.801900e+03, double 2.926100e+03, double 4.801900e+03, double 2.934100e+03, i32 1, double 4.801500e+03, double 2.972500e+03, double 4.795300e+03, double 2.978900e+03, double 4.787700e+03, double 2.978900e+03, i32 1, double 4.780000e+03, double 2.978900e+03, double 4.773800e+03, double 2.972500e+03, double 4.773800e+03, double 2.964500e+03, i32 1, double 4.773800e+03, double 2.956500e+03, double 4.780000e+03, double 2.950000e+03, double 4.787700e+03, double 2.950000e+03, i32 1, double 4.795300e+03, double 2.950000e+03, double 4.801500e+03, double 2.956500e+03, double 4.801500e+03, double 2.964500e+03, i32 1, double 4.802400e+03, double 3.010200e+03, double 4.796200e+03, double 3.016600e+03, double 4.788500e+03, double 3.016600e+03, i32 1, double 4.780900e+03, double 3.016600e+03, double 4.774700e+03, double 3.010200e+03, double 4.774700e+03, double 3.002200e+03, i32 1, double 4.774700e+03, double 2.994200e+03, double 4.780900e+03, double 2.987700e+03, double 4.788500e+03, double 2.987700e+03, i32 1, double 4.796200e+03, double 2.987700e+03, double 4.802400e+03, double 2.994200e+03, double 4.802400e+03, double 3.002200e+03, i32 1, double 4.802400e+03, double 3.039400e+03, double 4.796200e+03, double 3.455800e+03, double 4.788500e+03, double 3.455800e+03, i32 1, double 4.780900e+03, double 3.455800e+03, double 4.774700e+03, double 3.039400e+03, double 4.774700e+03, double 3.031400e+03, i32 1, double 4.774700e+03, double 3.023400e+03, double 4.780900e+03, double 3.016900e+03, double 4.788500e+03, double 3.016900e+03, i32 1, double 4.796200e+03, double 3.016900e+03, double 4.802400e+03, double 3.023400e+03, double 4.802400e+03, double 3.031400e+03, i32 1, double 4.778600e+03, double 3.063100e+03, double 4.772400e+03, double 3.069600e+03, double 4.764700e+03, double 3.069600e+03, i32 1, double 4.757100e+03, double 3.069600e+03, double 4.750900e+03, double 3.063100e+03, double 4.750900e+03, double 3.055100e+03, i32 1, double 4.750900e+03, double 3.457100e+03, double 4.757100e+03, double 3.450700e+03, double 4.764700e+03, double 3.450700e+03, i32 1, double 4.772400e+03, double 3.450700e+03, double 4.778600e+03, double 3.457100e+03, double 4.778600e+03, double 3.055100e+03, i32 1, double 4.748600e+03, double 3.063600e+03, double 4.742400e+03, double 3.070000e+03, double 4.734700e+03, double 3.070000e+03, i32 1, double 4.727100e+03, double 3.070000e+03, double 4.720900e+03, double 3.063600e+03, double 4.720900e+03, double 3.055600e+03, i32 1, double 4.720900e+03, double 3.457600e+03, double 4.727100e+03, double 3.451100e+03, double 4.734700e+03, double 3.451100e+03, i32 1, double 4.742400e+03, double 3.451100e+03, double 4.748600e+03, double 3.457600e+03, double 4.748600e+03, double 3.055600e+03, i32 1, double 4.719500e+03, double 3.063600e+03, double 4.713300e+03, double 3.070000e+03, double 4.705700e+03, double 3.070000e+03, i32 1, double 4.698000e+03, double 3.070000e+03, double 4.691900e+03, double 3.063600e+03, double 4.691900e+03, double 3.055600e+03, i32 1, double 4.691900e+03, double 3.457600e+03, double 4.698000e+03, double 3.451100e+03, double 4.705700e+03, double 3.451100e+03, i32 1, double 4.713300e+03, double 3.451100e+03, double 4.719500e+03, double 3.457600e+03, double 4.719500e+03, double 3.055600e+03, i32 1, double 4.691300e+03, double 3.064000e+03, double 4.685100e+03, double 3.070500e+03, double 4.677500e+03, double 3.070500e+03, i32 1, double 4.669900e+03, double 3.070500e+03, double 4.663700e+03, double 3.064000e+03, double 4.663700e+03, double 3.056000e+03, i32 1, double 4.663700e+03, double 3.458000e+03, double 4.669900e+03, double 3.451600e+03, double 4.677500e+03, double 3.451600e+03, i32 1, double 4.685100e+03, double 3.451600e+03, double 4.691300e+03, double 3.458000e+03, double 4.691300e+03, double 3.056000e+03, i32 1, double 4.668500e+03, double 3.453000e+03, double 4.662300e+03, double 3.459400e+03, double 4.654700e+03, double 3.459400e+03, i32 1, double 4.647000e+03, double 3.459400e+03, double 4.640900e+03, double 3.453000e+03, double 4.640900e+03, double 3.035000e+03, i32 1, double 4.640900e+03, double 3.027000e+03, double 4.647000e+03, double 3.020500e+03, double 4.654700e+03, double 3.020500e+03, i32 1, double 4.662300e+03, double 3.020500e+03, double 4.668500e+03, double 3.027000e+03, double 4.668500e+03, double 3.035000e+03, i32 1, double 4.668500e+03, double 3.014300e+03, double 4.662300e+03, double 3.020800e+03, double 4.654700e+03, double 3.020800e+03, i32 1, double 4.647000e+03, double 3.020800e+03, double 4.640900e+03, double 3.014300e+03, double 4.640900e+03, double 3.006400e+03, i32 1, double 4.640900e+03, double 2.998400e+03, double 4.647000e+03, double 2.991900e+03, double 4.654700e+03, double 2.991900e+03, i32 1, double 4.662300e+03, double 2.991900e+03, double 4.668500e+03, double 2.998400e+03, double 4.668500e+03, double 3.006400e+03, i32 1, double 4.668100e+03, double 2.941100e+03, double 4.661900e+03, double 2.947600e+03, double 4.654200e+03, double 2.947600e+03, i32 1, double 4.646600e+03, double 2.947600e+03, double 4.640400e+03, double 2.941100e+03, double 4.640400e+03, double 2.933100e+03, i32 1, double 4.640400e+03, double 2.925200e+03, double 4.646600e+03, double 2.918700e+03, double 4.654200e+03, double 2.918700e+03, i32 1, double 4.661900e+03, double 2.918700e+03, double 4.668100e+03, double 2.925200e+03, double 4.668100e+03, double 2.933100e+03, i32 1, double 4.668500e+03, double 2.971600e+03, double 4.662300e+03, double 2.978100e+03, double 4.654700e+03, double 2.978100e+03, i32 1, double 4.647000e+03, double 2.978100e+03, double 4.640900e+03, double 2.971600e+03, double 4.640900e+03, double 2.963600e+03, i32 1, double 4.640900e+03, double 2.955700e+03, double 4.647000e+03, double 2.949200e+03, double 4.654700e+03, double 2.949200e+03, i32 1, double 4.662300e+03, double 2.949200e+03, double 4.668500e+03, double 2.955700e+03, double 4.668500e+03, double 2.963600e+03, i32 2, i32 1, double 4.691300e+03, double 3.056000e+03, i32 2, i32 1, double 4.748600e+03, double 3.055600e+03, i32 2, i32 1, double 4.778200e+03, double 2.912100e+03, i32 2, i32 1, double 4.749200e+03, double 2.912100e+03, i32 2, i32 1, double 4.802400e+03, double 3.031400e+03, i32 2, i32 1, double 4.778600e+03, double 3.055100e+03, i32 2, i32 1, double 4.801500e+03, double 2.964500e+03, i32 2, i32 1, double 4.802400e+03, double 3.002200e+03, i32 2, i32 1, double 4.719700e+03, double 2.912500e+03, i32 2, i32 1, double 4.801900e+03, double 2.934100e+03, i32 2, i32 1, double 4.719500e+03, double 3.055600e+03, i32 2, i32 1, double 4.668500e+03, double 3.006400e+03, i32 2, i32 1, double 4.668500e+03, double 3.035000e+03, i32 2, i32 1, double 4.668100e+03, double 2.933100e+03, i32 2, i32 1, double 4.668500e+03, double 2.963600e+03, i32 2, i32 48)
+  store double 1.000000e+00, ptr %d, align 8
+  %0 = load double, ptr %d, align 8
+  call void (ptr, ptr, ptr, ...) @variadic(ptr null, ptr null, ptr null, i32 1, double 1.234800e+03, double 2.363450e+03, double %0, i32 1, double 1.234560e+03, double 2.345670e+03, double 4.6334563e+03, double 2.423440e+03, double 4.234330e+03, double 2.965430e+03, i32 1, double 4.669300e+03, double 2.927500e+03, double 4.663100e+03, double 2.921000e+03, double 4.663100e+03, double 2.345100e+03, i32 1, double 3.663100e+03, double 2.905100e+03, double 4.669300e+03, double 2.898600e+03, double 4.676900e+03, double 2.898600e+03, i32 1, double 4.684600e+03, double 2.898600e+03, double 1.234800e+03, double 2.905100e+03, double 1.234800e+03, double 2.345100e+03, i32 1, double 7.719700e+03, double 2.920500e+03, double 4.713500e+03, double 2.927000e+03, double 4.705800e+03, double 2.927000e+03, i32 1, double 8.698200e+03, double 2.927000e+03, double 4.692000e+03, double 2.920500e+03, double 4.692000e+03, double 2.912500e+03, i32 1, double 4.692000e+03, double 2.945600e+03, double 4.698200e+03, double 2.898100e+03, double 4.705800e+03, double 2.898100e+03, i32 1, double 4.713500e+03, double 2.898100e+03, double 4.719700e+03, double 2.945600e+03, double 4.719700e+03, double 2.912500e+03, i32 1, double 4.749200e+03, double 2.920100e+03, double 4.743000e+03, double 2.926600e+03, double 4.735300e+03, double 2.926600e+03, i32 1, double 4.727700e+03, double 2.926600e+03, double 4.721500e+03, double 2.920100e+03, double 4.721500e+03, double 2.912100e+03, i32 1, double 4.721500e+03, double 2.945100e+03, double 4.727700e+03, double 2.897700e+03, double 4.735300e+03, double 2.897700e+03, i32 1, double 4.743000e+03, double 2.897700e+03, double 4.749200e+03, double 2.945100e+03, double 4.749200e+03, double 2.912100e+03, i32 1, double 4.778200e+03, double 2.920100e+03, double 4.772000e+03, double 2.926600e+03, double 4.764300e+03, double 2.926600e+03, i32 1, double 4.756700e+03, double 2.926600e+03, double 4.750500e+03, double 2.920100e+03, double 4.750500e+03, double 2.912100e+03, i32 1, double 4.750500e+03, double 2.945100e+03, double 4.756700e+03, double 2.897700e+03, double 4.764300e+03, double 2.897700e+03, i32 1, double 4.772000e+03, double 2.897700e+03, double 4.778200e+03, double 2.945100e+03, double 4.778200e+03, double 2.912100e+03, i32 1, double 4.801900e+03, double 2.942100e+03, double 4.795700e+03, double 2.948500e+03, double 4.788100e+03, double 2.948500e+03, i32 1, double 4.780500e+03, double 2.948500e+03, double 4.774300e+03, double 2.942100e+03, double 4.774300e+03, double 2.934100e+03, i32 1, double 4.774300e+03, double 2.926100e+03, double 4.780500e+03, double 2.919600e+03, double 4.788100e+03, double 2.919600e+03, i32 1, double 4.795700e+03, double 2.919600e+03, double 4.801900e+03, double 2.926100e+03, double 4.801900e+03, double 2.934100e+03, i32 1, double 4.801500e+03, double 2.972500e+03, double 4.795300e+03, double 2.978900e+03, double 4.787700e+03, double 2.978900e+03, i32 1, double 4.780000e+03, double 2.978900e+03, double 4.773800e+03, double 2.972500e+03, double 4.773800e+03, double 2.964500e+03, i32 1, double 4.773800e+03, double 2.956500e+03, double 4.780000e+03, double 2.950000e+03, double 4.787700e+03, double 2.950000e+03, i32 1, double 4.795300e+03, double 2.950000e+03, double 4.801500e+03, double 2.956500e+03, double 4.801500e+03, double 2.964500e+03, i32 1, double 4.802400e+03, double 3.010200e+03, double 4.796200e+03, double 3.016600e+03, double 4.788500e+03, double 3.016600e+03, i32 1, double 4.780900e+03, double 3.016600e+03, double 4.774700e+03, double 3.010200e+03, double 4.774700e+03, double 3.002200e+03, i32 1, double 4.774700e+03, double 2.994200e+03, double 4.780900e+03, double 2.987700e+03, double 4.788500e+03, double 2.987700e+03, i32 1, double 4.796200e+03, double 2.987700e+03, double 4.802400e+03, double 2.994200e+03, double 4.802400e+03, double 3.002200e+03, i32 1, double 4.802400e+03, double 3.039400e+03, double 4.796200e+03, double 3.455800e+03, double 4.788500e+03, double 3.455800e+03, i32 1, double 4.780900e+03, double 3.455800e+03, double 4.774700e+03, double 3.039400e+03, double 4.774700e+03, double 3.031400e+03, i32 1, double 4.774700e+03, double 3.023400e+03, double 4.780900e+03, double 3.016900e+03, double 4.788500e+03, double 3.016900e+03, i32 1, double 4.796200e+03, double 3.016900e+03, double 4.802400e+03, double 3.023400e+03, double 4.802400e+03, double 3.031400e+03, i32 1, double 4.778600e+03, double 3.063100e+03, double 4.772400e+03, double 3.069600e+03, double 4.764700e+03, double 3.069600e+03, i32 1, double 4.757100e+03, double 3.069600e+03, double 4.750900e+03, double 3.063100e+03, double 4.750900e+03, double 3.055100e+03, i32 1, double 4.750900e+03, double 3.457100e+03, double 4.757100e+03, double 3.450700e+03, double 4.764700e+03, double 3.450700e+03, i32 1, double 4.772400e+03, double 3.450700e+03, double 4.778600e+03, double 3.457100e+03, double 4.778600e+03, double 3.055100e+03, i32 1, double 4.748600e+03, double 3.063600e+03, double 4.742400e+03, double 3.070000e+03, double 4.734700e+03, double 3.070000e+03, i32 1, double 4.727100e+03, double 3.070000e+03, double 4.720900e+03, double 3.063600e+03, double 4.720900e+03, double 3.055600e+03, i32 1, double 4.720900e+03, double 3.457600e+03, double 4.727100e+03, double 3.451100e+03, double 4.734700e+03, double 3.451100e+03, i32 1, double 4.742400e+03, double 3.451100e+03, double 4.748600e+03, double 3.457600e+03, double 4.748600e+03, double 3.055600e+03, i32 1, double 4.719500e+03, double 3.063600e+03, double 4.713300e+03, double 3.070000e+03, double 4.705700e+03, double 3.070000e+03, i32 1, double 4.698000e+03, double 3.070000e+03, double 4.691900e+03, double 3.063600e+03, double 4.691900e+03, double 3.055600e+03, i32 1, double 4.691900e+03, double 3.457600e+03, double 4.698000e+03, double 3.451100e+03, double 4.705700e+03, double 3.451100e+03, i32 1, double 4.713300e+03, double 3.451100e+03, double 4.719500e+03, double 3.457600e+03, double 4.719500e+03, double 3.055600e+03, i32 1, double 4.691300e+03, double 3.064000e+03, double 4.685100e+03, double 3.070500e+03, double 4.677500e+03, double 3.070500e+03, i32 1, double 4.669900e+03, double 3.070500e+03, double 4.663700e+03, double 3.064000e+03, double 4.663700e+03, double 3.056000e+03, i32 1, double 4.663700e+03, double 3.458000e+03, double 4.669900e+03, double 3.451600e+03, double 4.677500e+03, double 3.451600e+03, i32 1, double 4.685100e+03, double 3.451600e+03, double 4.691300e+03, double 3.458000e+03, double 4.691300e+03, double 3.056000e+03, i32 1, double 4.668500e+03, double 3.453000e+03, double 4.662300e+03, double 3.459400e+03, double 4.654700e+03, double 3.459400e+03, i32 1, double 4.647000e+03, double 3.459400e+03, double 4.640900e+03, double 3.453000e+03, double 4.640900e+03, double 3.035000e+03, i32 1, double 4.640900e+03, double 3.027000e+03, double 4.647000e+03, double 3.020500e+03, double 4.654700e+03, double 3.020500e+03, i32 1, double 4.662300e+03, double 3.020500e+03, double 4.668500e+03, double 3.027000e+03, double 4.668500e+03, double 3.035000e+03, i32 1, double 4.668500e+03, double 3.014300e+03, double 4.662300e+03, double 3.020800e+03, double 4.654700e+03, double 3.020800e+03, i32 1, double 4.647000e+03, double 3.020800e+03, double 4.640900e+03, double 3.014300e+03, double 4.640900e+03, double 3.006400e+03, i32 1, double 4.640900e+03, double 2.998400e+03, double 4.647000e+03, double 2.991900e+03, double 4.654700e+03, double 2.991900e+03, i32 1, double 4.662300e+03, double 2.991900e+03, double 4.668500e+03, double 2.998400e+03, double 4.668500e+03, double 3.006400e+03, i32 1, double 4.668100e+03, double 2.941100e+03, double 4.661900e+03, double 2.947600e+03, double 4.654200e+03, double 2.947600e+03, i32 1, double 4.646600e+03, double 2.947600e+03, double 4.640400e+03, double 2.941100e+03, double 4.640400e+03, double 2.933100e+03, i32 1, double 4.640400e+03, double 2.925200e+03, double 4.646600e+03, double 2.918700e+03, double 4.654200e+03, double 2.918700e+03, i32 1, double 4.661900e+03, double 2.918700e+03, double 4.668100e+03, double 2.925200e+03, double 4.668100e+03, double 2.933100e+03, i32 1, double 4.668500e+03, double 2.971600e+03, double 4.662300e+03, double 2.978100e+03, double 4.654700e+03, double 2.978100e+03, i32 1, double 4.647000e+03, double 2.978100e+03, double 4.640900e+03, double 2.971600e+03, double 4.640900e+03, double 2.963600e+03, i32 1, double 4.640900e+03, double 2.955700e+03, double 4.647000e+03, double 2.949200e+03, double 4.654700e+03, double 2.949200e+03, i32 1, double 4.662300e+03, double 2.949200e+03, double 4.668500e+03, double 2.955700e+03, double 4.668500e+03, double 2.963600e+03, i32 2, i32 1, double 4.691300e+03, double 3.056000e+03, i32 2, i32 1, double 4.748600e+03, double 3.055600e+03, i32 2, i32 1, double 4.778200e+03, double 2.912100e+03, i32 2, i32 1, double 4.749200e+03, double 2.912100e+03, i32 2, i32 1, double 4.802400e+03, double 3.031400e+03, i32 2, i32 1, double 4.778600e+03, double 3.055100e+03, i32 2, i32 1, double 4.801500e+03, double 2.964500e+03, i32 2, i32 1, double 4.802400e+03, double 3.002200e+03, i32 2, i32 1, double 4.719700e+03, double 2.912500e+03, i32 2, i32 1, double 4.801900e+03, double 2.934100e+03, i32 2, i32 1, double 4.719500e+03, double 3.055600e+03, i32 2, i32 1, double 4.668500e+03, double 3.006400e+03, i32 2, i32 1, double 4.668500e+03, double 3.035000e+03, i32 2, i32 1, double 4.668100e+03, double 2.933100e+03, i32 2, i32 1, double 4.668500e+03, double 2.963600e+03, i32 2, i32 48)
   ret i32 0
 }
 
-declare void @variadic(i8*, i8*, i8*, ...)
+declare void @variadic(ptr, ptr, ptr, ...)
 

diff  --git a/llvm/test/CodeGen/Thumb2/large-stack.ll b/llvm/test/CodeGen/Thumb2/large-stack.ll
index 158be9f9f47b0..a6ebd2ecfd3e6 100644
--- a/llvm/test/CodeGen/Thumb2/large-stack.ll
+++ b/llvm/test/CodeGen/Thumb2/large-stack.ll
@@ -35,7 +35,7 @@ define i32 @test3() {
     %retval = alloca i32, align 4
     %tmp = alloca i32, align 4
     %a = alloca [805306369 x i8], align 16
-    store i32 0, i32* %tmp
-    %tmp1 = load i32, i32* %tmp
+    store i32 0, ptr %tmp
+    %tmp1 = load i32, ptr %tmp
     ret i32 %tmp1
 }

diff  --git a/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll b/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
index e6beb751fbb85..4ac7b511db89f 100644
--- a/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ b/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
@@ -4,12 +4,12 @@
 
 %0 = type { i16, i8, i8 }
 %1 = type { [2 x i32], [2 x i32] }
-%2 = type { %union.rec* }
+%2 = type { ptr }
 %struct.FILE_POS = type { i8, i8, i16, i32 }
 %struct.GAP = type { i8, i8, i16 }
-%struct.LIST = type { %union.rec*, %union.rec* }
+%struct.LIST = type { ptr, ptr }
 %struct.STYLE = type { %union.anon, %union.anon, i16, i16, i32 }
-%struct.head_type = type { [2 x %struct.LIST], %union.FIRST_UNION, %union.SECOND_UNION, %union.THIRD_UNION, %union.FOURTH_UNION, %union.rec*, %2, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, i32 }
+%struct.head_type = type { [2 x %struct.LIST], %union.FIRST_UNION, %union.SECOND_UNION, %union.THIRD_UNION, %union.FOURTH_UNION, ptr, %2, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32 }
 %union.FIRST_UNION = type { %struct.FILE_POS }
 %union.FOURTH_UNION = type { %struct.STYLE }
 %union.SECOND_UNION = type { %0 }
@@ -17,10 +17,10 @@
 %union.anon = type { %struct.GAP }
 %union.rec = type { %struct.head_type }
 
- at zz_hold = external global %union.rec*            ; <%union.rec**> [#uses=2]
- at zz_res = external global %union.rec*             ; <%union.rec**> [#uses=1]
+ at zz_hold = external global ptr            ; <ptr> [#uses=2]
+ at zz_res = external global ptr             ; <ptr> [#uses=1]
 
-define %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
+define ptr @Manifest(ptr %x, ptr %env, ptr %style, ptr %bthr, ptr %fthr, ptr %target, ptr %crs, i32 %ok, i32 %need_expand, ptr %enclose, i32 %fcr) nounwind {
 ; CHECK-LABEL: Manifest:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    push {r4, r5, r6, r7, lr}
@@ -79,8 +79,8 @@ define %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %s
 ; CHECK-NEXT:  LBB0_5: @ %bb20
 ; CHECK-NEXT:    trap
 entry:
-  %xgaps.i = alloca [32 x %union.rec*], align 4   ; <[32 x %union.rec*]*> [#uses=0]
-  %ycomp.i = alloca [32 x %union.rec*], align 4   ; <[32 x %union.rec*]*> [#uses=0]
+  %xgaps.i = alloca [32 x ptr], align 4   ; <ptr> [#uses=0]
+  %ycomp.i = alloca [32 x ptr], align 4   ; <ptr> [#uses=0]
   br label %bb20
 
 bb20:                                             ; preds = %entry
@@ -101,17 +101,17 @@ bb119:                                            ; preds = %bb20, %bb20
   unreachable
 
 bb420:                                            ; preds = %bb20, %bb20
-  store volatile %union.rec* null, %union.rec** @zz_hold, align 4
-  store %union.rec* null, %union.rec** @zz_res, align 4
-  store volatile %union.rec* %x, %union.rec** @zz_hold, align 4
-  %0 = call  %union.rec* @Manifest(%union.rec* undef, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind ; <%union.rec*> [#uses=0]
+  store volatile ptr null, ptr @zz_hold, align 4
+  store ptr null, ptr @zz_res, align 4
+  store volatile ptr %x, ptr @zz_hold, align 4
+  %0 = call  ptr @Manifest(ptr undef, ptr %env, ptr %style, ptr %bthr, ptr %fthr, ptr %target, ptr %crs, i32 %ok, i32 %need_expand, ptr %enclose, i32 %fcr) nounwind ; <ptr> [#uses=0]
   unreachable
 
 bb438:                                            ; preds = %bb20, %bb20
   unreachable
 
 bb533:                                            ; preds = %bb20
-  ret %union.rec* %x
+  ret ptr %x
 
 bb569:                                            ; preds = %bb20
   unreachable

diff  --git a/llvm/test/CodeGen/Thumb2/lsll0.ll b/llvm/test/CodeGen/Thumb2/lsll0.ll
index 4e9735be077af..f6e75894d0063 100644
--- a/llvm/test/CodeGen/Thumb2/lsll0.ll
+++ b/llvm/test/CodeGen/Thumb2/lsll0.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -enable-arm-maskedgatscat=false -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @_Z4loopPxS_iS_i(i64* %d) {
+define void @_Z4loopPxS_iS_i(ptr %d) {
 ; CHECK-LABEL: _Z4loopPxS_iS_i:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -17,7 +17,7 @@ define void @_Z4loopPxS_iS_i(i64* %d) {
 ; CHECK-NEXT:    strd r1, r12, [r0, #8]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <2 x i64>, <2 x i64>* undef, align 8
+  %wide.load = load <2 x i64>, ptr undef, align 8
   %0 = trunc <2 x i64> %wide.load to <2 x i32>
   %1 = shl <2 x i32> %0, <i32 16, i32 16>
   %2 = ashr exact <2 x i32> %1, <i32 16, i32 16>
@@ -30,8 +30,8 @@ entry:
   %9 = trunc i64 %8 to i16
   %10 = sub i16 0, %9
   %11 = sext i16 %10 to i64
-  %12 = getelementptr inbounds i64, i64* %d, i64 undef
-  store i64 %11, i64* %12, align 8
+  %12 = getelementptr inbounds i64, ptr %d, i64 undef
+  store i64 %11, ptr %12, align 8
   %13 = extractelement <2 x i32> %5, i32 1
   %14 = zext i32 %13 to i64
   %15 = select i1 false, i64 %14, i64 undef
@@ -40,7 +40,7 @@ entry:
   %18 = sext i16 %17 to i64
   %19 = or i32 0, 1
   %20 = sext i32 %19 to i64
-  %21 = getelementptr inbounds i64, i64* %d, i64 %20
-  store i64 %18, i64* %21, align 8
+  %21 = getelementptr inbounds i64, ptr %d, i64 %20
+  store i64 %18, ptr %21, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll b/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
index bd1be6b6f85ed..886f8440325f3 100644
--- a/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
+++ b/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
@@ -3,14 +3,14 @@
 
 ; This now reduces to a single induction variable.
 
- at G = external global i32                          ; <i32*> [#uses=2]
- at array = external global i32*                     ; <i32**> [#uses=1]
+ at G = external global i32                          ; <ptr> [#uses=2]
+ at array = external global ptr                     ; <ptr> [#uses=1]
 
 define void @t() nounwind optsize {
 ; CHECK-LABEL: t:
 ; CHECK: mov{{.*}}, #1000
 entry:
-  %.pre = load i32, i32* @G, align 4                   ; <i32> [#uses=1]
+  %.pre = load i32, ptr @G, align 4                   ; <i32> [#uses=1]
   br label %bb
 
 bb:                                               ; preds = %bb, %entry
@@ -22,11 +22,11 @@ bb:                                               ; preds = %bb, %entry
   %0 = phi i32 [ %.pre, %entry ], [ %3, %bb ]     ; <i32> [#uses=1]
   %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
   %tmp5 = sub i32 1000, %indvar                   ; <i32> [#uses=1]
-  %1 = load i32*, i32** @array, align 4                 ; <i32*> [#uses=1]
-  %scevgep = getelementptr i32, i32* %1, i32 %tmp5     ; <i32*> [#uses=1]
-  %2 = load i32, i32* %scevgep, align 4                ; <i32> [#uses=1]
+  %1 = load ptr, ptr @array, align 4                 ; <ptr> [#uses=1]
+  %scevgep = getelementptr i32, ptr %1, i32 %tmp5     ; <ptr> [#uses=1]
+  %2 = load i32, ptr %scevgep, align 4                ; <i32> [#uses=1]
   %3 = add nsw i32 %2, %0                         ; <i32> [#uses=2]
-  store i32 %3, i32* @G, align 4
+  store i32 %3, ptr @G, align 4
   %indvar.next = add i32 %indvar, 1               ; <i32> [#uses=2]
   %exitcond = icmp eq i32 %indvar.next, 1001      ; <i1> [#uses=1]
   br i1 %exitcond, label %return, label %bb

diff  --git a/llvm/test/CodeGen/Thumb2/m4-sched-regs.ll b/llvm/test/CodeGen/Thumb2/m4-sched-regs.ll
index 29952feff0708..85a7f95f82630 100644
--- a/llvm/test/CodeGen/Thumb2/m4-sched-regs.ll
+++ b/llvm/test/CodeGen/Thumb2/m4-sched-regs.ll
@@ -4,10 +4,10 @@
 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv7em-arm-none-eabi"
 
-%struct.a = type { i32, %struct.b*, i8, i8, i8, i8, i8*, %struct.b*, i16, i16, i16, i16, i16, i16, i16, i16, i32, i32, i32, i32, i32, i32, i32 }
+%struct.a = type { i32, ptr, i8, i8, i8, i8, ptr, ptr, i16, i16, i16, i16, i16, i16, i16, i16, i32, i32, i32, i32, i32, i32, i32 }
 %struct.b = type { i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, [16 x i8], [64 x i8], [128 x i8], i32, [68 x i8] }
 
-define void @test(%struct.a* nocapture %dhcp, i16 zeroext %value) #0 {
+define void @test(ptr nocapture %dhcp, i16 zeroext %value) #0 {
 ; CHECK-LABEL: test:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrh r2, [r0, #20]
@@ -27,23 +27,23 @@ define void @test(%struct.a* nocapture %dhcp, i16 zeroext %value) #0 {
 entry:
   %shr = lshr i16 %value, 8
   %conv1 = trunc i16 %shr to i8
-  %msg_out = getelementptr inbounds %struct.a, %struct.a* %dhcp, i32 0, i32 7
-  %0 = load %struct.b*, %struct.b** %msg_out, align 4
-  %options_out_len = getelementptr inbounds %struct.a, %struct.a* %dhcp, i32 0, i32 8
-  %1 = load i16, i16* %options_out_len, align 4
+  %msg_out = getelementptr inbounds %struct.a, ptr %dhcp, i32 0, i32 7
+  %0 = load ptr, ptr %msg_out, align 4
+  %options_out_len = getelementptr inbounds %struct.a, ptr %dhcp, i32 0, i32 8
+  %1 = load i16, ptr %options_out_len, align 4
   %inc = add i16 %1, 1
-  store i16 %inc, i16* %options_out_len, align 4
+  store i16 %inc, ptr %options_out_len, align 4
   %idxprom = zext i16 %1 to i32
-  %arrayidx = getelementptr inbounds %struct.b, %struct.b* %0, i32 0, i32 15, i32 %idxprom
-  store i8 %conv1, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds %struct.b, ptr %0, i32 0, i32 15, i32 %idxprom
+  store i8 %conv1, ptr %arrayidx, align 1
   %conv4 = trunc i16 %value to i8
-  %2 = load %struct.b*, %struct.b** %msg_out, align 4
-  %3 = load i16, i16* %options_out_len, align 4
+  %2 = load ptr, ptr %msg_out, align 4
+  %3 = load i16, ptr %options_out_len, align 4
   %inc8 = add i16 %3, 1
-  store i16 %inc8, i16* %options_out_len, align 4
+  store i16 %inc8, ptr %options_out_len, align 4
   %idxprom9 = zext i16 %3 to i32
-  %arrayidx10 = getelementptr inbounds %struct.b, %struct.b* %2, i32 0, i32 15, i32 %idxprom9
-  store i8 %conv4, i8* %arrayidx10, align 1
+  %arrayidx10 = getelementptr inbounds %struct.b, ptr %2, i32 0, i32 15, i32 %idxprom9
+  store i8 %conv4, ptr %arrayidx10, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/machine-licm.ll b/llvm/test/CodeGen/Thumb2/machine-licm.ll
index d2438ac4242ef..5a2ec9280de77 100644
--- a/llvm/test/CodeGen/Thumb2/machine-licm.ll
+++ b/llvm/test/CodeGen/Thumb2/machine-licm.ll
@@ -3,9 +3,9 @@
 ; rdar://7353541
 ; rdar://7354376
 
- at GV = external global i32                         ; <i32*> [#uses=2]
+ at GV = external global i32                         ; <ptr> [#uses=2]
 
-define void @t1(i32* nocapture %vals, i32 %c) nounwind {
+define void @t1(ptr nocapture %vals, i32 %c) nounwind {
 entry:
 ; CHECK-LABEL: t1:
 ; CHECK: bxeq lr
@@ -29,16 +29,16 @@ bb.nph:                                           ; preds = %entry
 ; PIC: LBB0_
 ; PIC-NOT: LCPI0_0:
 ; PIC: .section
-  %.pre = load i32, i32* @GV, align 4                  ; <i32> [#uses=1]
+  %.pre = load i32, ptr @GV, align 4                  ; <i32> [#uses=1]
   br label %bb
 
 bb:                                               ; preds = %bb, %bb.nph
   %1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ]    ; <i32> [#uses=1]
   %i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ]     ; <i32> [#uses=2]
-  %scevgep = getelementptr i32, i32* %vals, i32 %i.03  ; <i32*> [#uses=1]
-  %2 = load i32, i32* %scevgep, align 4                ; <i32> [#uses=1]
+  %scevgep = getelementptr i32, ptr %vals, i32 %i.03  ; <ptr> [#uses=1]
+  %2 = load i32, ptr %scevgep, align 4                ; <i32> [#uses=1]
   %3 = add nsw i32 %1, %2                         ; <i32> [#uses=2]
-  store i32 %3, i32* @GV, align 4
+  store i32 %3, ptr @GV, align 4
   %4 = add i32 %i.03, 1                           ; <i32> [#uses=2]
   %exitcond = icmp eq i32 %4, %c                  ; <i1> [#uses=1]
   br i1 %exitcond, label %return, label %bb
@@ -48,7 +48,7 @@ return:                                           ; preds = %bb, %entry
 }
 
 ; rdar://8001136
-define void @t2(i8* %ptr1, i8* %ptr2) nounwind {
+define void @t2(ptr %ptr1, ptr %ptr2) nounwind {
 entry:
 ; CHECK-LABEL: t2:
 ; CHECK: vmov.f32 q{{.*}}, #1.000000e+00
@@ -58,11 +58,11 @@ bb1:
 ; CHECK: %bb1
   %indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %entry ]
   %tmp1 = shl i32 %indvar, 2
-  %gep1 = getelementptr i8, i8* %ptr1, i32 %tmp1
-  %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %gep1, i32 1)
+  %gep1 = getelementptr i8, ptr %ptr1, i32 %tmp1
+  %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr %gep1, i32 1)
   %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %tmp2)
-  %gep2 = getelementptr i8, i8* %ptr2, i32 %tmp1
-  call void @llvm.arm.neon.vst1.p0i8.v4f32(i8* %gep2, <4 x float> %tmp3, i32 1)
+  %gep2 = getelementptr i8, ptr %ptr2, i32 %tmp1
+  call void @llvm.arm.neon.vst1.p0.v4f32(ptr %gep2, <4 x float> %tmp3, i32 1)
   %indvar.next = add i32 %indvar, 1
   %cond = icmp eq i32 %indvar.next, 10
   br i1 %cond, label %bb2, label %bb1
@@ -73,9 +73,9 @@ bb2:
 
 ; CHECK-NOT: LCPI1_0:
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr, i32) nounwind readonly
 
-declare void @llvm.arm.neon.vst1.p0i8.v4f32(i8*, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst1.p0.v4f32(ptr, <4 x float>, i32) nounwind
 
 declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-basic.ll b/llvm/test/CodeGen/Thumb2/mve-basic.ll
index 797295c5b201e..6f81c836ca8fb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-basic.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-basic.ll
@@ -13,7 +13,7 @@ define arm_aapcs_vfpcc <4 x i32> @vector_add_by_value(<4 x i32> %lhs, <4 x i32>%
   ret <4 x i32> %result
 }
 
-define void @vector_add_by_reference(<4 x i32>* %resultp, <4 x i32>* %lhsp, <4 x i32>* %rhsp) {
+define void @vector_add_by_reference(ptr %resultp, ptr %lhsp, ptr %rhsp) {
 ; CHECK-LABEL: vector_add_by_reference:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -23,21 +23,21 @@ define void @vector_add_by_reference(<4 x i32>* %resultp, <4 x i32>* %lhsp, <4 x
 ; CHECK-NEXT:    @NO_APP
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
-  %lhs = load <4 x i32>, <4 x i32>* %lhsp, align 16
-  %rhs = load <4 x i32>, <4 x i32>* %rhsp, align 16
+  %lhs = load <4 x i32>, ptr %lhsp, align 16
+  %rhs = load <4 x i32>, ptr %rhsp, align 16
   %result = tail call <4 x i32> asm "vadd.i32 $0,$1,$2", "=t,t,t"(<4 x i32> %lhs, <4 x i32> %rhs)
-  store <4 x i32> %result, <4 x i32>* %resultp, align 16
+  store <4 x i32> %result, ptr %resultp, align 16
   ret void
 }
 
-define void @vector_f64_copy(<2 x double>* %from, <2 x double>* %to) {
+define void @vector_f64_copy(ptr %from, ptr %to) {
 ; CHECK-LABEL: vector_f64_copy:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
-  %v = load <2 x double>, <2 x double>* %from, align 16
-  store <2 x double> %v, <2 x double>* %to, align 16
+  %v = load <2 x double>, ptr %from, align 16
+  store <2 x double> %v, ptr %to, align 16
   ret void
 }
 
@@ -52,8 +52,8 @@ define arm_aapcs_vfpcc <16 x i8> @stack_slot_handling(<16 x i8> %a) #0 {
 ; CHECK-NEXT:    bx lr
 entry:
   %a.addr = alloca <16 x i8>, align 8
-  store <16 x i8> %a, <16 x i8>* %a.addr, align 8
-  %0 = load <16 x i8>, <16 x i8>* %a.addr, align 8
+  store <16 x i8> %a, ptr %a.addr, align 8
+  %0 = load <16 x i8>, ptr %a.addr, align 8
   ret <16 x i8> %0
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-be.ll b/llvm/test/CodeGen/Thumb2/mve-be.ll
index 0cb72cf8bbd3e..522d6f8704b6a 100644
--- a/llvm/test/CodeGen/Thumb2/mve-be.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-be.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
 
-define void @load_load_add_store(<4 x i32> *%src1, <4 x i32> *%src2) {
+define void @load_load_add_store(ptr %src1, ptr %src2) {
 ; CHECK-LABEL: load_load_add_store:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -11,14 +11,14 @@ define void @load_load_add_store(<4 x i32> *%src1, <4 x i32> *%src2) {
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i32>, <4 x i32>* %src1, align 4
-  %l2 = load <4 x i32>, <4 x i32>* %src2, align 4
+  %l1 = load <4 x i32>, ptr %src1, align 4
+  %l2 = load <4 x i32>, ptr %src2, align 4
   %a = add <4 x i32> %l1, %l2
-  store <4 x i32> %a, <4 x i32>* %src1, align 4
+  store <4 x i32> %a, ptr %src1, align 4
   ret void
 }
 
-define void @load_load_add_store_align1(<4 x i32> *%src1, <4 x i32> *%src2) {
+define void @load_load_add_store_align1(ptr %src1, ptr %src2) {
 ; CHECK-LE-LABEL: load_load_add_store_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r1]
@@ -38,14 +38,14 @@ define void @load_load_add_store_align1(<4 x i32> *%src1, <4 x i32> *%src2) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i32>, <4 x i32>* %src1, align 1
-  %l2 = load <4 x i32>, <4 x i32>* %src2, align 1
+  %l1 = load <4 x i32>, ptr %src1, align 1
+  %l2 = load <4 x i32>, ptr %src2, align 1
   %a = add <4 x i32> %l1, %l2
-  store <4 x i32> %a, <4 x i32>* %src1, align 1
+  store <4 x i32> %a, ptr %src1, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @load_arg_add_store(<4 x i32> *%src1, <4 x i32> %src2) {
+define arm_aapcs_vfpcc void @load_arg_add_store(ptr %src1, <4 x i32> %src2) {
 ; CHECK-LE-LABEL: load_arg_add_store:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q1, [r0]
@@ -61,9 +61,9 @@ define arm_aapcs_vfpcc void @load_arg_add_store(<4 x i32> *%src1, <4 x i32> %src
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i32>, <4 x i32>* %src1, align 4
+  %l1 = load <4 x i32>, ptr %src1, align 4
   %a = add <4 x i32> %l1, %src2
-  store <4 x i32> %a, <4 x i32>* %src1, align 4
+  store <4 x i32> %a, ptr %src1, align 4
   ret void
 }
 
@@ -265,7 +265,7 @@ entry:
 }
 
 ; FIXME: This looks wrong
-define arm_aapcs_vfpcc <4 x i32> @test(i32* %data) {
+define arm_aapcs_vfpcc <4 x i32> @test(ptr %data) {
 ; CHECK-LE-LABEL: test:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0, #32]
@@ -289,12 +289,11 @@ define arm_aapcs_vfpcc <4 x i32> @test(i32* %data) {
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %data, i32 8
-  %0 = bitcast i32* %add.ptr to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  %3 = tail call <4 x i32> asm sideeffect "  VMULLB.s32 $0, $1, $1", "=&w,w"(<4 x i32> %2) #2
-  ret <4 x i32> %3
+  %add.ptr = getelementptr inbounds i32, ptr %data, i32 8
+  %0 = load <4 x i32>, ptr %add.ptr, align 4
+  %1 = add <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
+  %2 = tail call <4 x i32> asm sideeffect "  VMULLB.s32 $0, $1, $1", "=&w,w"(<4 x i32> %1) #2
+  ret <4 x i32> %2
 }
 
 ; Test case demonstrating that 'bitcast' reinterprets the memory format of a
@@ -302,7 +301,7 @@ entry:
 ; operations treating a register as having 
diff erent lane sizes, then in
 ; big-endian mode, it has to emit a vrev32.16, which is equivalent to the
 ; effect that vstrw.32 followed by vldrh.16 would have.
-define arm_aapcs_vfpcc void @test_bitcast(<4 x i32>* readonly %in, <8 x i16>* %out) {
+define arm_aapcs_vfpcc void @test_bitcast(ptr readonly %in, ptr %out) {
 ; CHECK-LE-LABEL: test_bitcast:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0]
@@ -320,18 +319,18 @@ define arm_aapcs_vfpcc void @test_bitcast(<4 x i32>* readonly %in, <8 x i16>* %o
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %vin = load <4 x i32>, <4 x i32>* %in, align 8
+  %vin = load <4 x i32>, ptr %in, align 8
   %vdbl = mul <4 x i32> %vin, %vin
   %cast = bitcast <4 x i32> %vdbl to <8 x i16>
   %cdbl = mul <8 x i16> %cast, %cast
-  store <8 x i16> %cdbl, <8 x i16>* %out, align 8
+  store <8 x i16> %cdbl, ptr %out, align 8
   ret void
 }
 
 ; Similar test case but using the arm.mve.vreinterpretq intrinsic instead,
 ; which is defined to reinterpret the in-register format, so it generates no
 ; instruction in either endianness.
-define arm_aapcs_vfpcc void @test_vreinterpretq(<4 x i32>* readonly %in, <8 x i16>* %out) {
+define arm_aapcs_vfpcc void @test_vreinterpretq(ptr readonly %in, ptr %out) {
 ; CHECK-LE-LABEL: test_vreinterpretq:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0]
@@ -348,11 +347,11 @@ define arm_aapcs_vfpcc void @test_vreinterpretq(<4 x i32>* readonly %in, <8 x i1
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %vin = load <4 x i32>, <4 x i32>* %in, align 8
+  %vin = load <4 x i32>, ptr %in, align 8
   %vdbl = mul <4 x i32> %vin, %vin
   %cast = call <8 x i16> @llvm.arm.mve.vreinterpretq.v8i16.v4i32(<4 x i32> %vdbl)
   %cdbl = mul <8 x i16> %cast, %cast
-  store <8 x i16> %cdbl, <8 x i16>* %out, align 8
+  store <8 x i16> %cdbl, ptr %out, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll b/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
index 82a69876ea03b..879d110c72e3d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
@@ -4,7 +4,7 @@
 @var_36 = hidden local_unnamed_addr global i8 0, align 1
 @arr_61 = hidden local_unnamed_addr global [1 x i32] zeroinitializer, align 4
 
-define i32 @test(i8 zeroext %var_2, i16 signext %var_15, [18 x [22 x i8]]* %arr_60) {
+define i32 @test(i8 zeroext %var_2, i16 signext %var_15, ptr %arr_60) {
 ; CHECK-LABEL: test:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -151,11 +151,11 @@ entry:
   br i1 %tobool.not, label %for.cond.cleanup, label %for.cond1.preheader
 
 for.cond1.preheader:                              ; preds = %entry
-  %cmp11.not = icmp eq [18 x [22 x i8]]* %arr_60, null
+  %cmp11.not = icmp eq ptr %arr_60, null
   br i1 %cmp11.not, label %for.cond1.us.preheader, label %for.cond1
 
 for.cond1.us.preheader:                           ; preds = %for.cond1.preheader
-  store i32 0, i32* getelementptr ([1 x i32], [1 x i32]* @arr_61, i32 21, i32 0), align 4
+  store i32 0, ptr getelementptr ([1 x i32], ptr @arr_61, i32 21, i32 0), align 4
   call void @llvm.trap()
   unreachable
 
@@ -176,12 +176,12 @@ for.cond.cleanup9:                                ; preds = %cond.end22.1
 
 for.body10:                                       ; preds = %cond.end22.1, %for.cond6.preheader
   %i_15.044 = phi i32 [ 0, %for.cond6.preheader ], [ %add.1, %cond.end22.1 ]
-  %arraydecay = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %i_15.044, i32 0
-  %0 = ptrtoint [22 x i8]* %arraydecay to i32
-  %arrayidx13 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %i_15.044
-  store i32 %0, i32* %arrayidx13, align 4
-  %arrayidx16 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv45, i32 %i_15.044
-  %1 = load i8, i8* %arrayidx16, align 1
+  %arraydecay = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %i_15.044, i32 0
+  %0 = ptrtoint ptr %arraydecay to i32
+  %arrayidx13 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %i_15.044
+  store i32 %0, ptr %arrayidx13, align 4
+  %arrayidx16 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv45, i32 %i_15.044
+  %1 = load i8, ptr %arrayidx16, align 1
   %tobool18.not = icmp eq i8 %1, 0
   br i1 %tobool18.not, label %cond.end22, label %cond.true19
 
@@ -194,14 +194,14 @@ cond.end22:                                       ; preds = %for.body10, %cond.t
   %cond23 = phi i32 [ %div.sext, %cond.true19 ], [ 0, %for.body10 ]
   %tobool24 = icmp ne i32 %cond23, 0
   %frombool = zext i1 %tobool24 to i8
-  store i8 %frombool, i8* @var_36, align 1
+  store i8 %frombool, ptr @var_36, align 1
   %add = or i32 %i_15.044, 1
-  %arraydecay.1 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %add, i32 0
-  %2 = ptrtoint [22 x i8]* %arraydecay.1 to i32
-  %arrayidx13.1 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %add
-  store i32 %2, i32* %arrayidx13.1, align 4
-  %arrayidx16.1 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv45, i32 %add
-  %3 = load i8, i8* %arrayidx16.1, align 1
+  %arraydecay.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add, i32 0
+  %2 = ptrtoint ptr %arraydecay.1 to i32
+  %arrayidx13.1 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add
+  store i32 %2, ptr %arrayidx13.1, align 4
+  %arrayidx16.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv45, i32 %add
+  %3 = load i8, ptr %arrayidx16.1, align 1
   %tobool18.not.1 = icmp eq i8 %3, 0
   br i1 %tobool18.not.1, label %cond.end22.1, label %cond.true19.1
 
@@ -214,19 +214,19 @@ cond.end22.1:                                     ; preds = %cond.true19.1, %con
   %cond23.1 = phi i32 [ %div.sext.1, %cond.true19.1 ], [ 0, %cond.end22 ]
   %tobool24.1 = icmp ne i32 %cond23.1, 0
   %frombool.1 = zext i1 %tobool24.1 to i8
-  store i8 %frombool.1, i8* @var_36, align 1
+  store i8 %frombool.1, ptr @var_36, align 1
   %add.1 = add nuw nsw i32 %i_15.044, 2
   %exitcond105.not.1 = icmp eq i32 %add.1, 22
   br i1 %exitcond105.not.1, label %for.cond.cleanup9, label %for.body10
 
 for.body10.1:                                     ; preds = %cond.end22.1.1, %for.cond.cleanup9
   %i_15.044.1 = phi i32 [ 0, %for.cond.cleanup9 ], [ %add.1.1, %cond.end22.1.1 ]
-  %arraydecay.1108 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %i_15.044.1, i32 0
-  %4 = ptrtoint [22 x i8]* %arraydecay.1108 to i32
-  %arrayidx13.1109 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %i_15.044.1
-  store i32 %4, i32* %arrayidx13.1109, align 4
-  %arrayidx16.1110 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv, i32 %i_15.044.1
-  %5 = load i8, i8* %arrayidx16.1110, align 1
+  %arraydecay.1108 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %i_15.044.1, i32 0
+  %4 = ptrtoint ptr %arraydecay.1108 to i32
+  %arrayidx13.1109 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %i_15.044.1
+  store i32 %4, ptr %arrayidx13.1109, align 4
+  %arrayidx16.1110 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv, i32 %i_15.044.1
+  %5 = load i8, ptr %arrayidx16.1110, align 1
   %tobool18.not.1111 = icmp eq i8 %5, 0
   br i1 %tobool18.not.1111, label %cond.end22.1119, label %cond.true19.1114
 
@@ -239,14 +239,14 @@ cond.end22.1119:                                  ; preds = %cond.true19.1114, %
   %cond23.1115 = phi i32 [ %div.sext.1113, %cond.true19.1114 ], [ 0, %for.body10.1 ]
   %tobool24.1116 = icmp ne i32 %cond23.1115, 0
   %frombool.1117 = zext i1 %tobool24.1116 to i8
-  store i8 %frombool.1117, i8* @var_36, align 1
+  store i8 %frombool.1117, ptr @var_36, align 1
   %add.1118 = or i32 %i_15.044.1, 1
-  %arraydecay.1.1 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %add.1118, i32 0
-  %6 = ptrtoint [22 x i8]* %arraydecay.1.1 to i32
-  %arrayidx13.1.1 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %add.1118
-  store i32 %6, i32* %arrayidx13.1.1, align 4
-  %arrayidx16.1.1 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv, i32 %add.1118
-  %7 = load i8, i8* %arrayidx16.1.1, align 1
+  %arraydecay.1.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add.1118, i32 0
+  %6 = ptrtoint ptr %arraydecay.1.1 to i32
+  %arrayidx13.1.1 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add.1118
+  store i32 %6, ptr %arrayidx13.1.1, align 4
+  %arrayidx16.1.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv, i32 %add.1118
+  %7 = load i8, ptr %arrayidx16.1.1, align 1
   %tobool18.not.1.1 = icmp eq i8 %7, 0
   br i1 %tobool18.not.1.1, label %cond.end22.1.1, label %cond.true19.1.1
 
@@ -259,7 +259,7 @@ cond.end22.1.1:                                   ; preds = %cond.true19.1.1, %c
   %cond23.1.1 = phi i32 [ %div.sext.1.1, %cond.true19.1.1 ], [ 0, %cond.end22.1119 ]
   %tobool24.1.1 = icmp ne i32 %cond23.1.1, 0
   %frombool.1.1 = zext i1 %tobool24.1.1 to i8
-  store i8 %frombool.1.1, i8* @var_36, align 1
+  store i8 %frombool.1.1, ptr @var_36, align 1
   %add.1.1 = add nuw nsw i32 %i_15.044.1, 2
   %exitcond105.not.1.1 = icmp eq i32 %add.1.1, 22
   br i1 %exitcond105.not.1.1, label %for.cond.cleanup9.1, label %for.body10.1
@@ -271,12 +271,12 @@ for.cond.cleanup9.1:                              ; preds = %cond.end22.1.1
 
 for.body10.2:                                     ; preds = %cond.end22.1.2, %for.cond.cleanup9.1
   %i_15.044.2 = phi i32 [ 0, %for.cond.cleanup9.1 ], [ %add.1.2, %cond.end22.1.2 ]
-  %arraydecay.2 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %i_15.044.2, i32 0
-  %8 = ptrtoint [22 x i8]* %arraydecay.2 to i32
-  %arrayidx13.2 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %i_15.044.2
-  store i32 %8, i32* %arrayidx13.2, align 4
-  %arrayidx16.2 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv.1, i32 %i_15.044.2
-  %9 = load i8, i8* %arrayidx16.2, align 1
+  %arraydecay.2 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %i_15.044.2, i32 0
+  %8 = ptrtoint ptr %arraydecay.2 to i32
+  %arrayidx13.2 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %i_15.044.2
+  store i32 %8, ptr %arrayidx13.2, align 4
+  %arrayidx16.2 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv.1, i32 %i_15.044.2
+  %9 = load i8, ptr %arrayidx16.2, align 1
   %tobool18.not.2 = icmp eq i8 %9, 0
   br i1 %tobool18.not.2, label %cond.end22.2, label %cond.true19.2
 
@@ -289,14 +289,14 @@ cond.end22.2:                                     ; preds = %cond.true19.2, %for
   %cond23.2 = phi i32 [ %div.sext.2, %cond.true19.2 ], [ 0, %for.body10.2 ]
   %tobool24.2 = icmp ne i32 %cond23.2, 0
   %frombool.2 = zext i1 %tobool24.2 to i8
-  store i8 %frombool.2, i8* @var_36, align 1
+  store i8 %frombool.2, ptr @var_36, align 1
   %add.2 = or i32 %i_15.044.2, 1
-  %arraydecay.1.2 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 %add.2, i32 0
-  %10 = ptrtoint [22 x i8]* %arraydecay.1.2 to i32
-  %arrayidx13.1.2 = getelementptr inbounds [1 x i32], [1 x i32]* @arr_61, i32 0, i32 %add.2
-  store i32 %10, i32* %arrayidx13.1.2, align 4
-  %arrayidx16.1.2 = getelementptr inbounds [18 x [22 x i8]], [18 x [22 x i8]]* %arr_60, i32 1, i32 %conv.1, i32 %add.2
-  %11 = load i8, i8* %arrayidx16.1.2, align 1
+  %arraydecay.1.2 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add.2, i32 0
+  %10 = ptrtoint ptr %arraydecay.1.2 to i32
+  %arrayidx13.1.2 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add.2
+  store i32 %10, ptr %arrayidx13.1.2, align 4
+  %arrayidx16.1.2 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 1, i32 %conv.1, i32 %add.2
+  %11 = load i8, ptr %arrayidx16.1.2, align 1
   %tobool18.not.1.2 = icmp eq i8 %11, 0
   br i1 %tobool18.not.1.2, label %cond.end22.1.2, label %cond.true19.1.2
 
@@ -309,7 +309,7 @@ cond.end22.1.2:                                   ; preds = %cond.true19.1.2, %c
   %cond23.1.2 = phi i32 [ %div.sext.1.2, %cond.true19.1.2 ], [ 0, %cond.end22.2 ]
   %tobool24.1.2 = icmp ne i32 %cond23.1.2, 0
   %frombool.1.2 = zext i1 %tobool24.1.2 to i8
-  store i8 %frombool.1.2, i8* @var_36, align 1
+  store i8 %frombool.1.2, ptr @var_36, align 1
   %add.1.2 = add nuw nsw i32 %i_15.044.2, 2
   %exitcond105.not.1.2 = icmp eq i32 %add.1.2, 22
   br i1 %exitcond105.not.1.2, label %for.cond.cleanup9.2, label %for.body10.2
@@ -630,24 +630,24 @@ vector.body:                                      ; preds = %for.body6.us, %vect
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %for.body6.us ]
   %vec.ind = phi <4 x i32> [ %vec.ind.next, %vector.body ], [ %induction, %for.body6.us ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %4)
-  %10 = getelementptr inbounds [1 x i32], [1 x i32]* @c, i32 0, <4 x i32> %vec.ind
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %broadcast.splat108, <4 x i32*> %10, i32 4, <4 x i1> %active.lane.mask)
+  %10 = getelementptr inbounds [1 x i32], ptr @c, i32 0, <4 x i32> %vec.ind
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %broadcast.splat108, <4 x ptr> %10, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 12, i32 12, i32 12, i32 12>
   %11 = icmp eq i32 %index.next, %n.vec
   br i1 %11, label %for.cond9.for.cond15.preheader_crit_edge.us, label %vector.body
 
 for.body13.us51.preheader:                        ; preds = %for.body6.us
-  store i32 0, i32* @b, align 4
-  store i32 0, i32* @a, align 4
+  store i32 0, ptr @b, align 4
+  store i32 0, ptr @a, align 4
   br label %vector.body111
 
 vector.body111:                                   ; preds = %vector.body111, %for.body13.us51.preheader
   %index117 = phi i32 [ 0, %for.body13.us51.preheader ], [ %index.next118, %vector.body111 ]
   %vec.ind124 = phi <4 x i32> [ %induction123, %for.body13.us51.preheader ], [ %vec.ind.next125, %vector.body111 ]
   %active.lane.mask130 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index117, i32 %9)
-  %12 = getelementptr inbounds [1 x i32], [1 x i32]* @c, i32 0, <4 x i32> %vec.ind124
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %broadcast.splat132, <4 x i32*> %12, i32 4, <4 x i1> %active.lane.mask130)
+  %12 = getelementptr inbounds [1 x i32], ptr @c, i32 0, <4 x i32> %vec.ind124
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %broadcast.splat132, <4 x ptr> %12, i32 4, <4 x i1> %active.lane.mask130)
   %index.next118 = add i32 %index117, 4
   %vec.ind.next125 = add <4 x i32> %vec.ind124, <i32 12, i32 12, i32 12, i32 12>
   %13 = icmp eq i32 %index.next118, %n.vec116
@@ -730,4 +730,4 @@ for.cond.cleanup17.us63.3:                        ; preds = %for.cond.cleanup17.
 }
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) #1
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32 immarg, <4 x i1>) #2
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32 immarg, <4 x i1>) #2

diff  --git a/llvm/test/CodeGen/Thumb2/mve-extractstore.ll b/llvm/test/CodeGen/Thumb2/mve-extractstore.ll
index 1c2c031158da6..941ae78cc9a79 100644
--- a/llvm/test/CodeGen/Thumb2/mve-extractstore.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-extractstore.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define half @extret1_f16_sf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define half @extret1_f16_sf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_f16_sf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmov d0, r0, r1
@@ -15,11 +15,11 @@ define half @extret1_f16_sf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 1
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   ret half %e
 }
 
-define half @extret4_f16_sf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define half @extret4_f16_sf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret4_f16_sf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r0, sp
@@ -32,11 +32,11 @@ define half @extret4_f16_sf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 4
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   ret half %e
 }
 
-define arm_aapcs_vfpcc half @extret1_f16_hf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define arm_aapcs_vfpcc half @extret1_f16_hf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_f16_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
@@ -45,11 +45,11 @@ define arm_aapcs_vfpcc half @extret1_f16_hf(<8 x half> %a, <8 x half> %b, half*
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 1
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   ret half %e
 }
 
-define arm_aapcs_vfpcc half @extret4_f16_hf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define arm_aapcs_vfpcc half @extret4_f16_hf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret4_f16_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
@@ -58,11 +58,11 @@ define arm_aapcs_vfpcc half @extret4_f16_hf(<8 x half> %a, <8 x half> %b, half*
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 4
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   ret half %e
 }
 
-define arm_aapcs_vfpcc <8 x half> @extret1_v8f16_hf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define arm_aapcs_vfpcc <8 x half> @extret1_v8f16_hf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_v8f16_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
@@ -72,13 +72,13 @@ define arm_aapcs_vfpcc <8 x half> @extret1_v8f16_hf(<8 x half> %a, <8 x half> %b
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 1
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   %i = insertelement <8 x half> undef, half %e, i32 0
   %s = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
   ret <8 x half> %s
 }
 
-define arm_aapcs_vfpcc <8 x half> @extret4_v8f16_hf(<8 x half> %a, <8 x half> %b, half* nocapture %p) {
+define arm_aapcs_vfpcc <8 x half> @extret4_v8f16_hf(<8 x half> %a, <8 x half> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret4_v8f16_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
@@ -88,14 +88,14 @@ define arm_aapcs_vfpcc <8 x half> @extret4_v8f16_hf(<8 x half> %a, <8 x half> %b
 ; CHECK-NEXT:    bx lr
   %c = fadd <8 x half> %a, %b
   %e = extractelement <8 x half> %c, i32 4
-  store half %e, half* %p, align 2
+  store half %e, ptr %p, align 2
   %i = insertelement <8 x half> undef, half %e, i32 0
   %s = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
   ret <8 x half> %s
 }
 
 
-define float @extret1_f32_sf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define float @extret1_f32_sf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_f32_sf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmov d0, r0, r1
@@ -108,11 +108,11 @@ define float @extret1_f32_sf(<4 x float> %a, <4 x float> %b, float* nocapture %p
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 1
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   ret float %e
 }
 
-define float @extret2_f32_sf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define float @extret2_f32_sf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret2_f32_sf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r0, sp
@@ -125,11 +125,11 @@ define float @extret2_f32_sf(<4 x float> %a, <4 x float> %b, float* nocapture %p
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 2
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   ret float %e
 }
 
-define arm_aapcs_vfpcc float @extret1_f32_hf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define arm_aapcs_vfpcc float @extret1_f32_hf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_f32_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f32 q0, q0, q1
@@ -138,12 +138,12 @@ define arm_aapcs_vfpcc float @extret1_f32_hf(<4 x float> %a, <4 x float> %b, flo
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 1
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   ret float %e
 }
 
 
-define arm_aapcs_vfpcc float @extret2_f32_hf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define arm_aapcs_vfpcc float @extret2_f32_hf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret2_f32_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f32 q0, q0, q1
@@ -152,11 +152,11 @@ define arm_aapcs_vfpcc float @extret2_f32_hf(<4 x float> %a, <4 x float> %b, flo
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 2
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   ret float %e
 }
 
-define arm_aapcs_vfpcc <4 x float> @extret1_v4f32_hf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define arm_aapcs_vfpcc <4 x float> @extret1_v4f32_hf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret1_v4f32_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f32 q1, q0, q1
@@ -166,13 +166,13 @@ define arm_aapcs_vfpcc <4 x float> @extret1_v4f32_hf(<4 x float> %a, <4 x float>
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 1
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   %i = insertelement <4 x float> undef, float %e, i32 0
   %s = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %s
 }
 
-define arm_aapcs_vfpcc <4 x float> @extret2_v4f32_hf(<4 x float> %a, <4 x float> %b, float* nocapture %p) {
+define arm_aapcs_vfpcc <4 x float> @extret2_v4f32_hf(<4 x float> %a, <4 x float> %b, ptr nocapture %p) {
 ; CHECK-LABEL: extret2_v4f32_hf:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vadd.f32 q1, q0, q1
@@ -182,7 +182,7 @@ define arm_aapcs_vfpcc <4 x float> @extret2_v4f32_hf(<4 x float> %a, <4 x float>
 ; CHECK-NEXT:    bx lr
   %c = fadd <4 x float> %a, %b
   %e = extractelement <4 x float> %c, i32 2
-  store float %e, float* %p, align 4
+  store float %e, ptr %p, align 4
   %i = insertelement <4 x float> undef, float %e, i32 0
   %s = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %s

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll b/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
index a34a278103a02..71cc744ae8ba8 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fma-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @fmas1(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmas1(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmas1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -34,26 +34,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
-  %6 = getelementptr inbounds float, float* %z, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+  %4 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fmas2(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmas2(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmas2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -86,27 +83,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
-  %6 = fadd fast <4 x float> %5, %broadcast.splat14
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
+  %4 = fadd fast <4 x float> %3, %broadcast.splat14
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fma1(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fma1(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fma1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -139,26 +133,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %wide.masked.load12)
-  %6 = getelementptr inbounds float, float* %z, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %wide.masked.load12)
+  %4 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fma2(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fma2(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fma2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -191,27 +182,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat13
-  %4 = getelementptr inbounds float, float* %y, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.masked.load14 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %5, i32 4, <4 x i1> %1, <4 x float> undef)
-  %6 = fadd fast <4 x float> %3, %wide.masked.load14
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat13
+  %3 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load14 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x float> undef)
+  %4 = fadd fast <4 x float> %2, %wide.masked.load14
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fmss1(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmss1(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmss1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -246,26 +234,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
-  %6 = getelementptr inbounds float, float* %z, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+  %4 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fmss2(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmss2(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmss2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -301,27 +286,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
-  %6 = fsub fast <4 x float> %5, %broadcast.splat14
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
+  %4 = fsub fast <4 x float> %3, %broadcast.splat14
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fmss3(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmss3(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmss3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -356,27 +338,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fneg fast <4 x float> %wide.masked.load12
-  %6 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %5, <4 x float> %broadcast.splat14)
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fneg fast <4 x float> %wide.masked.load12
+  %4 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %3, <4 x float> %broadcast.splat14)
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fmss4(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fmss4(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fmss4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -411,27 +390,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
-  %6 = fsub fast <4 x float> %broadcast.splat14, %5
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
+  %4 = fsub fast <4 x float> %broadcast.splat14, %3
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fms1(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fms1(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fms1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -466,26 +442,23 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %wide.masked.load12)
-  %6 = getelementptr inbounds float, float* %z, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %wide.masked.load12)
+  %4 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fms2(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fms2(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fms2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -519,27 +492,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat14
-  %6 = fsub fast <4 x float> %wide.masked.load12, %5
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat14
+  %4 = fsub fast <4 x float> %wide.masked.load12, %3
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fms3(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fms3(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fms3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -573,27 +543,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = getelementptr inbounds float, float* %y, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %1, <4 x float> undef)
-  %5 = fneg fast <4 x float> %wide.masked.load12
-  %6 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %5)
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+  %3 = fneg fast <4 x float> %wide.masked.load12
+  %4 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %broadcast.splat14, <4 x float> %3)
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @fms4(float* nocapture readonly %x, float* nocapture readonly %y, float* noalias nocapture %z, float %a, i32 %n) {
+define arm_aapcs_vfpcc void @fms4(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
 ; CHECK-LABEL: fms4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -627,27 +594,24 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
   %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %2 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> undef)
-  %3 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat13
-  %4 = getelementptr inbounds float, float* %y, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.masked.load14 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %5, i32 4, <4 x i1> %1, <4 x float> undef)
-  %6 = fsub fast <4 x float> %3, %wide.masked.load14
-  %7 = getelementptr inbounds float, float* %z, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %1)
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+  %2 = fmul fast <4 x float> %wide.masked.load, %broadcast.splat13
+  %3 = getelementptr inbounds float, ptr %y, i32 %index
+  %wide.masked.load14 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x float> undef)
+  %4 = fsub fast <4 x float> %2, %wide.masked.load14
+  %5 = getelementptr inbounds float, ptr %z, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll b/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
index a02f6b8ebb82c..0f11e241eac30 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @to_4(float* nocapture readonly %x, half* noalias nocapture %y) {
+define void @to_4(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: to_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -23,23 +23,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = fmul <4 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %3 = fptrunc <4 x float> %2 to <4 x half>
-  %4 = getelementptr inbounds half, half* %y, i32 %index
-  %5 = bitcast half* %4 to <4 x half>*
-  store <4 x half> %3, <4 x half>* %5, align 2
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
+  %wide.load = load <4 x float>, ptr %0, align 4
+  %1 = fmul <4 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %2 = fptrunc <4 x float> %1 to <4 x half>
+  %3 = getelementptr inbounds half, ptr %y, i32 %index
+  store <4 x half> %2, ptr %3, align 2
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @to_8(float* nocapture readonly %x, half* noalias nocapture %y) {
+define void @to_8(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: to_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -65,23 +63,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
-  %1 = bitcast float* %0 to <8 x float>*
-  %wide.load = load <8 x float>, <8 x float>* %1, align 4
-  %2 = fmul <8 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %3 = fptrunc <8 x float> %2 to <8 x half>
-  %4 = getelementptr inbounds half, half* %y, i32 %index
-  %5 = bitcast half* %4 to <8 x half>*
-  store <8 x half> %3, <8 x half>* %5, align 2
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
+  %wide.load = load <8 x float>, ptr %0, align 4
+  %1 = fmul <8 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %2 = fptrunc <8 x float> %1 to <8 x half>
+  %3 = getelementptr inbounds half, ptr %y, i32 %index
+  store <8 x half> %2, ptr %3, align 2
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @to_16(float* nocapture readonly %x, half* noalias nocapture %y) {
+define void @to_16(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: to_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -115,23 +111,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %x, i32 %index
-  %1 = bitcast float* %0 to <16 x float>*
-  %wide.load = load <16 x float>, <16 x float>* %1, align 4
-  %2 = fmul <16 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %3 = fptrunc <16 x float> %2 to <16 x half>
-  %4 = getelementptr inbounds half, half* %y, i32 %index
-  %5 = bitcast half* %4 to <16 x half>*
-  store <16 x half> %3, <16 x half>* %5, align 2
+  %0 = getelementptr inbounds float, ptr %x, i32 %index
+  %wide.load = load <16 x float>, ptr %0, align 4
+  %1 = fmul <16 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %2 = fptrunc <16 x float> %1 to <16 x half>
+  %3 = getelementptr inbounds half, ptr %y, i32 %index
+  store <16 x half> %2, ptr %3, align 2
   %index.next = add i32 %index, 16
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @from_4(half* nocapture readonly %x, float* noalias nocapture %y) {
+define void @from_4(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: from_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -153,23 +147,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = fpext <4 x half> %wide.load to <4 x float>
-  %3 = fmul <4 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = getelementptr inbounds float, float* %y, i32 %index
-  %5 = bitcast float* %4 to <4 x float>*
-  store <4 x float> %3, <4 x float>* %5, align 4
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = fpext <4 x half> %wide.load to <4 x float>
+  %2 = fmul <4 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = getelementptr inbounds float, ptr %y, i32 %index
+  store <4 x float> %2, ptr %3, align 4
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @from_8(half* nocapture readonly %x, float* noalias nocapture %y) {
+define void @from_8(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: from_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -195,23 +187,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %1, align 2
-  %2 = fpext <8 x half> %wide.load to <8 x float>
-  %3 = fmul <8 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = getelementptr inbounds float, float* %y, i32 %index
-  %5 = bitcast float* %4 to <8 x float>*
-  store <8 x float> %3, <8 x float>* %5, align 4
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <8 x half>, ptr %0, align 2
+  %1 = fpext <8 x half> %wide.load to <8 x float>
+  %2 = fmul <8 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = getelementptr inbounds float, ptr %y, i32 %index
+  store <8 x float> %2, ptr %3, align 4
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @from_16(half* nocapture readonly %x, float* noalias nocapture %y) {
+define void @from_16(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: from_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -245,23 +235,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <16 x half>*
-  %wide.load = load <16 x half>, <16 x half>* %1, align 2
-  %2 = fpext <16 x half> %wide.load to <16 x float>
-  %3 = fmul <16 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = getelementptr inbounds float, float* %y, i32 %index
-  %5 = bitcast float* %4 to <16 x float>*
-  store <16 x float> %3, <16 x float>* %5, align 4
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <16 x half>, ptr %0, align 2
+  %1 = fpext <16 x half> %wide.load to <16 x float>
+  %2 = fmul <16 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = getelementptr inbounds float, ptr %y, i32 %index
+  store <16 x float> %2, ptr %3, align 4
   %index.next = add i32 %index, 16
-  %6 = icmp eq i32 %index.next, 1024
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @both_4(half* nocapture readonly %x, half* noalias nocapture %y) {
+define void @both_4(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: both_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -284,24 +272,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <4 x half>*
-  %wide.load = load <4 x half>, <4 x half>* %1, align 2
-  %2 = fpext <4 x half> %wide.load to <4 x float>
-  %3 = fmul <4 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = fptrunc <4 x float> %3 to <4 x half>
-  %5 = getelementptr inbounds half, half* %y, i32 %index
-  %6 = bitcast half* %5 to <4 x half>*
-  store <4 x half> %4, <4 x half>* %6, align 2
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <4 x half>, ptr %0, align 2
+  %1 = fpext <4 x half> %wide.load to <4 x float>
+  %2 = fmul <4 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <4 x float> %2 to <4 x half>
+  %4 = getelementptr inbounds half, ptr %y, i32 %index
+  store <4 x half> %3, ptr %4, align 2
   %index.next = add i32 %index, 4
-  %7 = icmp eq i32 %index.next, 1024
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, 1024
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @both_8(half* nocapture readonly %x, half* noalias nocapture %y) {
+define void @both_8(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: both_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -327,24 +313,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %1, align 2
-  %2 = fpext <8 x half> %wide.load to <8 x float>
-  %3 = fmul <8 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = fptrunc <8 x float> %3 to <8 x half>
-  %5 = getelementptr inbounds half, half* %y, i32 %index
-  %6 = bitcast half* %5 to <8 x half>*
-  store <8 x half> %4, <8 x half>* %6, align 2
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <8 x half>, ptr %0, align 2
+  %1 = fpext <8 x half> %wide.load to <8 x float>
+  %2 = fmul <8 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <8 x float> %2 to <8 x half>
+  %4 = getelementptr inbounds half, ptr %y, i32 %index
+  store <8 x half> %3, ptr %4, align 2
   %index.next = add i32 %index, 8
-  %7 = icmp eq i32 %index.next, 1024
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, 1024
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @both_16(half* nocapture readonly %x, half* noalias nocapture %y) {
+define void @both_16(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: both_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -378,24 +362,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <16 x half>*
-  %wide.load = load <16 x half>, <16 x half>* %1, align 2
-  %2 = fpext <16 x half> %wide.load to <16 x float>
-  %3 = fmul <16 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %4 = fptrunc <16 x float> %3 to <16 x half>
-  %5 = getelementptr inbounds half, half* %y, i32 %index
-  %6 = bitcast half* %5 to <16 x half>*
-  store <16 x half> %4, <16 x half>* %6, align 2
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <16 x half>, ptr %0, align 2
+  %1 = fpext <16 x half> %wide.load to <16 x float>
+  %2 = fmul <16 x float> %1, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <16 x float> %2 to <16 x half>
+  %4 = getelementptr inbounds half, ptr %y, i32 %index
+  store <16 x half> %3, ptr %4, align 2
   %index.next = add i32 %index, 16
-  %7 = icmp eq i32 %index.next, 1024
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, 1024
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @both_8_I(half* nocapture readonly %x, half* noalias nocapture %y) {
+define void @both_8_I(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: both_8_I:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -421,29 +403,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %1, align 2
-  %2 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %3 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <8 x half>, ptr %0, align 2
+  %1 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %2 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %3 = fpext <4 x half> %1 to <4 x float>
   %4 = fpext <4 x half> %2 to <4 x float>
-  %5 = fpext <4 x half> %3 to <4 x float>
+  %5 = fmul <4 x float> %3, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
   %6 = fmul <4 x float> %4, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %7 = fmul <4 x float> %5, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %8 = shufflevector <4 x float> %6, <4 x float> %7, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  %9 = fptrunc <8 x float> %8 to <8 x half>
-  %10 = getelementptr inbounds half, half* %y, i32 %index
-  %11 = bitcast half* %10 to <8 x half>*
-  store <8 x half> %9, <8 x half>* %11, align 2
+  %7 = shufflevector <4 x float> %5, <4 x float> %6, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %8 = fptrunc <8 x float> %7 to <8 x half>
+  %9 = getelementptr inbounds half, ptr %y, i32 %index
+  store <8 x half> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @both_16_I(half* nocapture readonly %x, half* noalias nocapture %y) {
+define void @both_16_I(ptr nocapture readonly %x, ptr noalias nocapture %y) {
 ; CHECK-LABEL: both_16_I:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -477,23 +457,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds half, half* %x, i32 %index
-  %1 = bitcast half* %0 to <16 x half>*
-  %wide.load = load <16 x half>, <16 x half>* %1, align 2
-  %2 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %3 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %0 = getelementptr inbounds half, ptr %x, i32 %index
+  %wide.load = load <16 x half>, ptr %0, align 2
+  %1 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %2 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %3 = fpext <8 x half> %1 to <8 x float>
   %4 = fpext <8 x half> %2 to <8 x float>
-  %5 = fpext <8 x half> %3 to <8 x float>
+  %5 = fmul <8 x float> %3, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
   %6 = fmul <8 x float> %4, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %7 = fmul <8 x float> %5, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
-  %8 = shufflevector <8 x float> %6, <8 x float> %7, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  %9 = fptrunc <16 x float> %8 to <16 x half>
-  %10 = getelementptr inbounds half, half* %y, i32 %index
-  %11 = bitcast half* %10 to <16 x half>*
-  store <16 x half> %9, <16 x half>* %11, align 2
+  %7 = shufflevector <8 x float> %5, <8 x float> %6, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %8 = fptrunc <16 x float> %7 to <16 x half>
+  %9 = getelementptr inbounds half, ptr %y, i32 %index
+  store <16 x half> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 43e90e885010c..24f1831a3f07c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - -opaque-pointers | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) {
+define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <4 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_mini_4i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    movs r1, #4
@@ -9,12 +9,12 @@ define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(i32* noalias nocapture re
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
   %1 = add <4 x i32> %offs, <i32 4, i32 4, i32 4, i32 4>
-  %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) {
+define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <4 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_minipred_4i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    movs r1, #4
@@ -25,12 +25,12 @@ define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(i32* noalias nocaptur
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
   %1 = add <4 x i32> %offs, <i32 4, i32 4, i32 4, i32 4>
-  %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> undef)
+  %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> undef)
   ret <4 x i32> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) {
+define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <8 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_mini_8i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -64,12 +64,12 @@ define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(i16* noalias nocapture re
 ; CHECK-NEXT:    vmov.16 q0[7], r12
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
   %1 = add <8 x i32> %offs, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
-  %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %2 = getelementptr inbounds i16, ptr %data, <8 x i32> %1
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) {
+define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <8 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_minipred_8i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vshl.i32 q0, q0, #1
@@ -93,12 +93,12 @@ define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(i16* noalias nocaptur
 ; CHECK-NEXT:    vmov.16 q0[6], r1
 ; CHECK-NEXT:    bx lr
   %1 = add <8 x i32> %offs, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
-  %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> undef)
+  %2 = getelementptr inbounds i16, ptr %data, <8 x i32> %1
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %2, i32 4, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> undef)
   ret <8 x i16> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) {
+define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <16 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_mini_16i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -154,12 +154,12 @@ define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(i8* noalias nocapture rea
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
   %1 = add <16 x i32> %offs, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %2 = getelementptr inbounds i8, ptr %data, <16 x i32> %1
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %2, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) {
+define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <16 x i32> %offs) {
 ; CHECK-LABEL: gather_inc_minipred_16i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -199,12 +199,12 @@ define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(i8* noalias nocapture
 ; CHECK-NEXT:    vmov.8 q0[14], r1
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
   %1 = add <16 x i32> %offs, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <16 x i8> undef)
+  %2 = getelementptr inbounds i8, ptr %data, <16 x i32> %1
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %2, i32 2, <16 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <16 x i8> undef)
   ret <16 x i8> %wide.masked.gather
 }
 
-define arm_aapcs_vfpcc void @gather_pre_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) {
+define arm_aapcs_vfpcc void @gather_pre_inc(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n.vec) {
 ; CHECK-LABEL: gather_pre_inc:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    adr r3, .LCPI6_0
@@ -233,21 +233,20 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %0 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
   %1 = add <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6>
-  %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %3 = getelementptr inbounds i32, i32* %dst, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4
+  %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, ptr %dst, i32 %index
+  store <4 x i32> %wide.masked.gather, ptr %3, align 4
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8>
-  %5 = icmp eq i32 %index.next, %n.vec
-  br i1 %5, label %end, label %vector.body
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %end, label %vector.body
 
 end:
   ret void;
 }
 
-define arm_aapcs_vfpcc void @gather_post_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec43) {
+define arm_aapcs_vfpcc void @gather_post_inc(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n.vec43) {
 ; CHECK-LABEL: gather_post_inc:
 ; CHECK:       @ %bb.0: @ %vector.ph41
 ; CHECK-NEXT:    adr r3, .LCPI7_0
@@ -275,21 +274,20 @@ vector.body39:                                    ; preds = %vector.body39, %vec
   %index44 = phi i32 [ 0, %vector.ph41 ], [ %index.next45, %vector.body39 ]
   %vec.ind50 = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph41 ], [ %vec.ind.next51, %vector.body39 ]
   %0 = mul nuw nsw <4 x i32> %vec.ind50, <i32 3, i32 3, i32 3, i32 3>
-  %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0
-  %wide.masked.gather55 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %2 = getelementptr inbounds i32, i32* %dst, i32 %index44
-  %3 = bitcast i32* %2 to <4 x i32>*
-  store <4 x i32> %wide.masked.gather55, <4 x i32>* %3, align 4
+  %1 = getelementptr inbounds i32, ptr %data, <4 x i32> %0
+  %wide.masked.gather55 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %2 = getelementptr inbounds i32, ptr %dst, i32 %index44
+  store <4 x i32> %wide.masked.gather55, ptr %2, align 4
   %index.next45 = add i32 %index44, 4
   %vec.ind.next51 = add <4 x i32> %vec.ind50, <i32 8, i32 8, i32 8, i32 8>
-  %4 = icmp eq i32 %index.next45, %n.vec43
-  br i1 %4, label %end, label %vector.body39
+  %3 = icmp eq i32 %index.next45, %n.vec43
+  br i1 %3, label %end, label %vector.body39
 
 end:
   ret void;
 }
 
-define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v4i32_simple:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -341,15 +339,14 @@ vector.ph:                                        ; preds = %for.body.preheader
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %1 = getelementptr inbounds i32, i32* %dst, i32 %index
-  %2 = bitcast i32* %1 to <4 x i32>*
-  store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
+  %0 = getelementptr inbounds i32, ptr %data, <4 x i32> %vec.ind
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %1 = getelementptr inbounds i32, ptr %dst, i32 %index
+  store <4 x i32> %wide.masked.gather, ptr %1, align 4
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %middle.block, label %vector.body
+  %2 = icmp eq i32 %index.next, %n.vec
+  br i1 %2, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -359,7 +356,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-define arm_aapcs_vfpcc void @gather_inc_v4i32_complex(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v4i32_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v4i32_complex:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -436,23 +433,22 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %0 = mul nuw nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
-  %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %1 = getelementptr inbounds i32, ptr %data, <4 x i32> %0
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %2 = add nuw nsw <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
-  %3 = getelementptr inbounds i32, i32* %data, <4 x i32> %2
-  %wide.masked.gather24 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, ptr %data, <4 x i32> %2
+  %wide.masked.gather24 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %4 = add nuw nsw <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
-  %5 = getelementptr inbounds i32, i32* %data, <4 x i32> %4
-  %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %5 = getelementptr inbounds i32, ptr %data, <4 x i32> %4
+  %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %6 = add nsw <4 x i32> %wide.masked.gather24, %wide.masked.gather
   %7 = add nsw <4 x i32> %6, %wide.masked.gather25
-  %8 = getelementptr inbounds i32, i32* %dst, i32 %index
-  %9 = bitcast i32* %8 to <4 x i32>*
-  store <4 x i32> %7, <4 x i32>* %9, align 4
+  %8 = getelementptr inbounds i32, ptr %dst, i32 %index
+  store <4 x i32> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -462,7 +458,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-define arm_aapcs_vfpcc void @gather_inc_v4i32_large(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v4i32_large(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v4i32_large:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -514,15 +510,14 @@ vector.ph:                                        ; preds = %for.body.preheader
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %1 = getelementptr inbounds i32, i32* %dst, i32 %index
-  %2 = bitcast i32* %1 to <4 x i32>*
-  store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
+  %0 = getelementptr inbounds i32, ptr %data, <4 x i32> %vec.ind
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %1 = getelementptr inbounds i32, ptr %dst, i32 %index
+  store <4 x i32> %wide.masked.gather, ptr %1, align 4
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 127, i32 127, i32 127, i32 127>
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %middle.block, label %vector.body
+  %2 = icmp eq i32 %index.next, %n.vec
+  br i1 %2, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -535,7 +530,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 ; TODO: uneven - I think it's not possible to create such an example, because vec.ind will always be increased by a vector with 4 elements (=> x*4 = even)
 
 ; TODO: What is sxth?
-define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v8i16_simple:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -632,15 +627,14 @@ vector.ph:                                        ; preds = %for.body.preheader
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <8 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %data, <8 x i16> %vec.ind
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %0, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
-  %1 = getelementptr inbounds i16, i16* %dst, i32 %index
-  %2 = bitcast i16* %1 to <8 x i16>*
-  store <8 x i16> %wide.masked.gather, <8 x i16>* %2, align 2
+  %0 = getelementptr inbounds i16, ptr %data, <8 x i16> %vec.ind
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %0, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %1 = getelementptr inbounds i16, ptr %dst, i32 %index
+  store <8 x i16> %wide.masked.gather, ptr %1, align 2
   %index.next = add i32 %index, 8
   %vec.ind.next = add <8 x i16> %vec.ind, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %middle.block, label %vector.body
+  %2 = icmp eq i32 %index.next, %n.vec
+  br i1 %2, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -651,7 +645,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 }
 
 ; TODO: This looks absolutely terrifying :(
-define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v8i16_complex:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -847,23 +841,22 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <8 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %0 = mul nuw nsw <8 x i16> %vec.ind, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-  %1 = getelementptr inbounds i16, i16* %data, <8 x i16> %0
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %1, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %1 = getelementptr inbounds i16, ptr %data, <8 x i16> %0
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %1, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %2 = add nuw nsw <8 x i16> %0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %3 = getelementptr inbounds i16, i16* %data, <8 x i16> %2
-  %wide.masked.gather24 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %3, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %3 = getelementptr inbounds i16, ptr %data, <8 x i16> %2
+  %wide.masked.gather24 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %3, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %4 = add nuw nsw <8 x i16> %0, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
-  %5 = getelementptr inbounds i16, i16* %data, <8 x i16> %4
-  %wide.masked.gather25 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %5, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %5 = getelementptr inbounds i16, ptr %data, <8 x i16> %4
+  %wide.masked.gather25 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %5, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %6 = add nsw <8 x i16> %wide.masked.gather24, %wide.masked.gather
   %7 = add nsw <8 x i16> %6, %wide.masked.gather25
-  %8 = getelementptr inbounds i16, i16* %dst, i32 %index
-  %9 = bitcast i16* %8 to <8 x i16>*
-  store <8 x i16> %7, <8 x i16>* %9, align 2
+  %8 = getelementptr inbounds i16, ptr %dst, i32 %index
+  store <8 x i16> %7, ptr %8, align 2
   %index.next = add i32 %index, 8
   %vec.ind.next = add <8 x i16> %vec.ind, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -874,7 +867,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 }
 
 
-define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v16i8_complex:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1209,23 +1202,22 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %0 = mul nuw nsw <16 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
-  %1 = getelementptr inbounds i8, i8* %data, <16 x i32> %0
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %1, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %1 = getelementptr inbounds i8, ptr %data, <16 x i32> %0
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %1, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %2 = add nuw nsw <16 x i32> %0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %3 = getelementptr inbounds i8, i8* %data, <16 x i32> %2
-  %wide.masked.gather24 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %3, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %3 = getelementptr inbounds i8, ptr %data, <16 x i32> %2
+  %wide.masked.gather24 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %3, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %4 = add nuw nsw <16 x i32> %0, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
-  %5 = getelementptr inbounds i8, i8* %data, <16 x i32> %4
-  %wide.masked.gather25 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %5, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %5 = getelementptr inbounds i8, ptr %data, <16 x i32> %4
+  %wide.masked.gather25 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %5, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %6 = add nsw <16 x i8> %wide.masked.gather24, %wide.masked.gather
   %7 = add nsw <16 x i8> %6, %wide.masked.gather25
-  %8 = getelementptr inbounds i8, i8* %dst, i32 %index
-  %9 = bitcast i8* %8 to <16 x i8>*
-  store <16 x i8> %7, <16 x i8>* %9, align 2
+  %8 = getelementptr inbounds i8, ptr %dst, i32 %index
+  store <16 x i8> %7, ptr %8, align 2
   %index.next = add i32 %index, 16
   %vec.ind.next = add <16 x i32> %vec.ind, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -1235,7 +1227,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v16i8_simple:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -1377,15 +1369,14 @@ vector.ph:                                        ; preds = %for.body.preheader
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %data, <16 x i32> %vec.ind
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %0, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
-  %1 = getelementptr inbounds i8, i8* %dst, i32 %index
-  %2 = bitcast i8* %1 to <16 x i8>*
-  store <16 x i8> %wide.masked.gather, <16 x i8>* %2, align 2
+  %0 = getelementptr inbounds i8, ptr %data, <16 x i32> %vec.ind
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %0, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %1 = getelementptr inbounds i8, ptr %dst, i32 %index
+  store <16 x i8> %wide.masked.gather, ptr %1, align 2
   %index.next = add i32 %index, 16
   %vec.ind.next = add <16 x i32> %vec.ind, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %middle.block, label %vector.body
+  %2 = icmp eq i32 %index.next, %n.vec
+  br i1 %2, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -1395,7 +1386,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-define void @shl(i32* nocapture %x, i32* noalias nocapture readonly %y, i32 %n) {
+define void @shl(ptr nocapture %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: shl:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1436,21 +1427,20 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
   %0 = shl nsw <4 x i32> %vec.ind, <i32 2, i32 2, i32 2, i32 2>
-  %1 = getelementptr inbounds i32, i32* %y, <4 x i32> %0
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
-  %2 = getelementptr inbounds i32, i32* %x, i32 %index
-  %3 = bitcast i32* %2 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %wide.masked.gather, <4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask)
+  %1 = getelementptr inbounds i32, ptr %y, <4 x i32> %0
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %2 = getelementptr inbounds i32, ptr %x, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %wide.masked.gather, ptr %2, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %for.cond.cleanup, label %vector.body
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @shlor(i32* nocapture %x, i32* noalias nocapture readonly %y, i32 %n) {
+define void @shlor(ptr nocapture %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: shlor:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1523,50 +1513,49 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
   %0 = shl nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
-  %1 = getelementptr inbounds i32, i32* %y, <4 x i32> %0
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %1 = getelementptr inbounds i32, ptr %y, <4 x i32> %0
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %2 = or <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
-  %3 = getelementptr inbounds i32, i32* %y, <4 x i32> %2
-  %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, ptr %y, <4 x i32> %2
+  %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %4 = add nsw <4 x i32> %wide.masked.gather25, %wide.masked.gather
   %5 = or <4 x i32> %0, <i32 4, i32 4, i32 4, i32 4>
-  %6 = getelementptr inbounds i32, i32* %y, <4 x i32> %5
-  %wide.masked.gather26 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %6, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %6 = getelementptr inbounds i32, ptr %y, <4 x i32> %5
+  %wide.masked.gather26 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %6, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %7 = add nsw <4 x i32> %4, %wide.masked.gather26
   %8 = or <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6>
-  %9 = getelementptr inbounds i32, i32* %y, <4 x i32> %8
-  %wide.masked.gather27 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %9, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %9 = getelementptr inbounds i32, ptr %y, <4 x i32> %8
+  %wide.masked.gather27 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %9, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %10 = add nsw <4 x i32> %7, %wide.masked.gather27
-  %11 = getelementptr inbounds i32, i32* %x, i32 %index
-  %12 = bitcast i32* %11 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %active.lane.mask)
+  %11 = getelementptr inbounds i32, ptr %x, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %11, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %12 = icmp eq i32 %index.next, %n.vec
+  br i1 %12, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 
-declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
-declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>)
-declare <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
-declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1>, <2 x float>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
-declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>)
-declare <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>)
-declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>)
-declare <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>)
-declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>)
-declare <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*>, i32, <16 x i1>, <16 x half>)
-declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
-declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>)
+declare <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i32>)
+declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
+declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>)
+declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>)
+declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i16>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>)
+declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x half>)
+declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind16-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind16-scaled.ll
index d6dddf756895e..0e457f2b61262 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind16-scaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind16-scaled.ll
@@ -1,50 +1,50 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_i16(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8f16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_half(half* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_half(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8f16_half:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds half, half* %base, <8 x i32> %offs.zext
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %ptrs = getelementptr inbounds half, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -77,14 +77,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(i16* %base, <8 x i16>* %offp
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.sext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.sext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_sext(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_sext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8f16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1]
@@ -111,72 +111,72 @@ define arm_aapcs_vfpcc <8 x half> @scaled_v8f16_sext(i16* %base, <8 x i16>* %off
 ; CHECK-NEXT:    vins.f16 s3, s4
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @unsigned_scaled_v8i16_i8(i16* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @unsigned_scaled_v8i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_scaled_v8i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @unsigned_scaled_v8f16_i8(i16* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @unsigned_scaled_v8f16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_scaled_v8f16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru0t(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru0t(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru0t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> zeroinitializer)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> zeroinitializer)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru1t(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru1t(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru1t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru1f(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru1f(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru1f:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movw r2, #65487
@@ -188,14 +188,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru1f(i16* %base, <8 x i
 ; CHECK-NEXT:    vpsel q0, q2, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru0f(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru0f(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru0f:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movw r2, #65523
@@ -205,14 +205,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru0f(i16* %base, <8 x i
 ; CHECK-NEXT:    vldrht.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp0(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp0(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru_icmp0:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
@@ -220,15 +220,15 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp0(i16* %base, <8
 ; CHECK-NEXT:    vldrht.u16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
   %mask = icmp sgt <8 x i16> %offs, zeroinitializer
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %mask, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %mask, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp1(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp1(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru_icmp1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
@@ -238,15 +238,15 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_passthru_icmp1(i16* %base, <8
 ; CHECK-NEXT:    vpsel q0, q2, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
   %mask = icmp sgt <8 x i16> %offs, zeroinitializer
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %mask, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %mask, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -282,14 +282,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep(i16* %base, <8 x i16>* %
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs
-  %ptrs2 = getelementptr inbounds i16, <8 x i16*> %ptrs, i16 20
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %offs = load <8 x i16>, ptr %offptr, align 2
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %offs
+  %ptrs2 = getelementptr inbounds i16, <8 x ptr> %ptrs, i16 20
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep2(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep2(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI14_0
@@ -308,13 +308,13 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_2gep2(i16* %base, <8 x i16>*
 ; CHECK-NEXT:    .short 76 @ 0x4c
 ; CHECK-NEXT:    .short 82 @ 0x52
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %ptrs, i16 20
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %ptrs, i16 20
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep(ptr %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI15_0
@@ -333,13 +333,13 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep(i16* %base) {
 ; CHECK-NEXT:    .short 76 @ 0x4c
 ; CHECK-NEXT:    .short 82 @ 0x52
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %ptrs, i32 20
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %ptrs, i32 20
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep2(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep2(ptr %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI16_0
@@ -358,12 +358,12 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep2(i16* %base) {
 ; CHECK-NEXT:    .short 18 @ 0x12
 ; CHECK-NEXT:    .short 21 @ 0x15
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep3(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep3(ptr %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -408,13 +408,13 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep3(i16* %base) {
 ; CHECK-NEXT:    .long 131084 @ 0x2000c
 ; CHECK-NEXT:    .long 131090 @ 0x20012
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %ptrs, i32 65536
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %ptrs, i32 65536
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep4(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep4(ptr %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -459,12 +459,12 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep4(i16* %base) {
 ; CHECK-NEXT:    .long 12 @ 0xc
 ; CHECK-NEXT:    .long 18 @ 0x12
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 65536, i32 18, i32 21>
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 65536, i32 18, i32 21>
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep5(<8 x i16*> %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep5(<8 x ptr> %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep5:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -494,12 +494,12 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep5(<8 x i16*> %base) {
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %base, i32 65536
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %base, i32 65536
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep6(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep6(ptr %base) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep6:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -544,13 +544,13 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep6(i16* %base) {
 ; CHECK-NEXT:    .long 14 @ 0xe
 ; CHECK-NEXT:    .long 20 @ 0x14
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 65536, i32 15, i32 18, i32 21>
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %ptrs, i32 1
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 65536, i32 15, i32 18, i32 21>
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %ptrs, i32 1
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep7(i16* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep7(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_biggep7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -595,13 +595,13 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_biggep7(i16* %base, <8 x i16>
 ; CHECK-NEXT:    .long 1212 @ 0x4bc
 ; CHECK-NEXT:    .long 1218 @ 0x4c2
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> <i16 65000, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
-  %ptrs2 = getelementptr inbounds i16,<8 x i16*> %ptrs, i16 600
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> <i16 65000, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
+  %ptrs2 = getelementptr inbounds i16,<8 x ptr> %ptrs, i16 600
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_basei32(i32* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_basei32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_i16_basei32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -634,14 +634,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_i16_basei32(i32* %base, <8 x i16>
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %offs.zext
-  %ptrs.cast = bitcast <8 x i32*> %ptrs to <8 x i16*>
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs.cast, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %offs.zext
+  %ptrs.cast = bitcast <8 x ptr> %ptrs to <8 x ptr>
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs.cast, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) #1
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) #1
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>) #1
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>) #1
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>) #1
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>) #1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind16-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind16-unscaled.ll
index 1b00aba367019..cee87b26ed279 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind16-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind16-unscaled.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
-define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16(i8* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.zext = zext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %gather.zext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16_noext(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16_noext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unscaled_i8_i16_noext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -48,14 +48,14 @@ define arm_aapcs_vfpcc <8 x i16> @zext_unscaled_i8_i16_noext(i8* %base, <8 x i8>
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 2
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %offs
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %offs = load <8 x i8>, ptr %offptr, align 2
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i8> %offs
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.zext = zext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %gather.zext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(i16* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -88,14 +88,14 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_sext(i16* %base, <8 x i8>* %offpt
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 2
+  %offs = load <8 x i8>, ptr %offptr, align 2
   %offs.sext = sext <8 x i8> %offs to <8 x i16>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs.sext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %offs.sext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_zext(i16* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_zext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_v8i16_zext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -128,118 +128,118 @@ define arm_aapcs_vfpcc <8 x i16> @scaled_v8i16_zext(i16* %base, <8 x i8>* %offpt
 ; CHECK-NEXT:    vmov.16 q0[7], lr
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 2
+  %offs = load <8 x i8>, ptr %offptr, align 2
   %offs.zext = zext <8 x i8> %offs to <8 x i16>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs.zext
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %offs.zext
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext_unscaled_i8_i16(i8* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @sext_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.sext = sext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %gather.sext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @unscaled_i16_i16(i8* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @unscaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @unscaled_f16_i16(i8* %base, <8 x i16>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @unscaled_f16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_f16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext_unsigned_unscaled_i8_i8(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @zext_unsigned_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.zext = zext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %gather.zext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext_unsigned_unscaled_i8_i8(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @sext_unsigned_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   %gather.sext = sext <8 x i8> %gather to <8 x i16>
   ret <8 x i16> %gather.sext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @unsigned_unscaled_i16_i8(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i16> @unsigned_unscaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  %gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   ret <8 x i16> %gather
 }
 
-define arm_aapcs_vfpcc <8 x half> @unsigned_unscaled_f16_i8(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x half> @unsigned_unscaled_f16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_unscaled_f16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  %gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   ret <8 x half> %gather
 }
 
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) #1
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) #1
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>) #1
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>) #1
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>) #1
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>) #1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
index a7261265552f8..80136e18ea4a3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
@@ -2,300 +2,300 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -opaque-pointers %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @zext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_scaled_i16_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_scaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_scaled_i16_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_scaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32(i32* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_i32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
 ; TODO: scaled_f16_i32
 
-define arm_aapcs_vfpcc <4 x float> @scaled_f32_i32(i32* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @scaled_f32_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_f32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i16(i32* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_scaled_b_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i16(i32* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: signed_scaled_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i16(i32* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: a_unsigned_scaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i16(i32* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: b_signed_scaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i16(i16* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i8(i32* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_scaled_b_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i8(i32* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: signed_scaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i8(i32* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: a_unsigned_scaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i8(i32* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @b_signed_scaled_f32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: b_signed_scaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_scaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_scaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_scaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i8(i16* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_scaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep(i32* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: scaled_i32_i32_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -306,14 +306,14 @@ define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep(i32* %base, <4 x i32>* %of
 ; CHECK-NEXT:    vldrw.u32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
-  %ptrs2 = getelementptr inbounds i32, <4 x i32*> %ptrs, i32 5
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %ptrs2 = getelementptr inbounds i32, <4 x ptr> %ptrs, i32 5
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2(ptr %base) {
 ; CHECK-LABEL: scaled_i32_i32_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI21_0
@@ -328,14 +328,14 @@ define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2(i32* %base) {
 ; CHECK-NEXT:    .long 44 @ 0x2c
 ; CHECK-NEXT:    .long 56 @ 0x38
 entry:
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %ptrs2 = getelementptr inbounds i32, <4 x i32*> %ptrs, i32 5
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %ptrs2 = getelementptr inbounds i32, <4 x ptr> %ptrs, i32 5
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
-declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
-declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
+declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
index ebe6403004049..58283012dc7eb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
@@ -2,462 +2,462 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -opaque-pointers %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i8_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unscaled_i8_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.zext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i8_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unscaled_i8_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.sext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i16_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unscaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i16_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unscaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_i32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @unscaled_f32_i32(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @unscaled_f32_i32(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_f32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_unscaled_b_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: signed_unscaled_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: a_unsigned_unscaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: b_signed_unscaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.zext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.sext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.zext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.sext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unsigned_unscaled_b_i32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unsigned_unscaled_b_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @signed_unscaled_i32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: signed_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @a_unsigned_unscaled_f32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: a_unsigned_unscaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x float> @b_signed_unscaled_f32_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: b_signed_unscaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   ret <4 x float> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.zext = zext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i16_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
-  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
   %gather.sext = sext <4 x i16> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_signed_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_signed_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.zext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_signed_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_signed_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.sext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @zext_unsigned_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: zext_unsigned_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.zext = zext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.zext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: sext_unsigned_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %gather.sext = sext <4 x i8> %gather to <4 x i32>
   ret <4 x i32> %gather.sext
 }
 
 ; VLDRW.u32 Qd, [P, 4]
-define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x i32*> %p) {
+define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x ptr> %p) {
 ; CHECK-LABEL: qi4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r0, #16
@@ -465,12 +465,12 @@ define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x i32*> %p) {
 ; CHECK-NEXT:    vldrw.u32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %g = getelementptr inbounds i32, <4 x i32*> %p, i32 4
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %g = getelementptr inbounds i32, <4 x ptr> %p, i32 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @qi4_unaligned(<4 x i32*> %p) {
+define arm_aapcs_vfpcc <4 x i32> @qi4_unaligned(<4 x ptr> %p) {
 ; CHECK-LABEL: qi4_unaligned:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r0, #16
@@ -485,13 +485,13 @@ define arm_aapcs_vfpcc <4 x i32> @qi4_unaligned(<4 x i32*> %p) {
 ; CHECK-NEXT:    vmov q0[3], q0[1], r3, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %g = getelementptr inbounds i32, <4 x i32*> %p, i32 4
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %g, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %g = getelementptr inbounds i32, <4 x ptr> %p, i32 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %g, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
-declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
-declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
+declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
index 7b18450cd2ac5..cb696f87e169c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind8-unscaled.ll
@@ -1,35 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r1]
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.zext
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <8 x i8> @unscaled_v8i8_i8(i8* %base, <8 x i8>* %offptr) {
+define arm_aapcs_vfpcc <8 x i8> @unscaled_v8i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v8i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %gather = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i8> undef)
   ret <8 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <2 x i8> @unscaled_v2i8_i8(i8* %base, <2 x i8>* %offptr) {
+define arm_aapcs_vfpcc <2 x i8> @unscaled_v2i8_i8(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v2i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r2, [r1]
@@ -44,14 +44,14 @@ define arm_aapcs_vfpcc <2 x i8> @unscaled_v2i8_i8(i8* %base, <2 x i8>* %offptr)
 ; CHECK-NEXT:    vmov q0[2], q0[0], r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i8>, <2 x i8>* %offptr, align 1
+  %offs = load <2 x i8>, ptr %offptr, align 1
   %offs.zext = zext <2 x i8> %offs to <2 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <2 x i32> %offs.zext
-  %gather = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <2 x i32> %offs.zext
+  %gather = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> %ptrs, i32 1, <2 x i1> <i1 true, i1 true>, <2 x i8> undef)
   ret <2 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_sext(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_sext(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -106,14 +106,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_sext(i8* %base, <16 x i8>* %off
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.sext = sext <16 x i8> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.sext
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i16(i8* %base, <16 x i16>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -168,14 +168,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i16(i8* %base, <16 x i16>* %off
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i16>, <16 x i16>* %offptr, align 2
+  %offs = load <16 x i16>, ptr %offptr, align 2
   %offs.sext = sext <16 x i16> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.sext
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_scaled(i32* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_scaled(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_scaled:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -234,15 +234,15 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_scaled(i32* %base, <16 x i8>* %
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 4
+  %offs = load <16 x i8>, ptr %offptr, align 4
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %ptrs32 = getelementptr inbounds i32, i32* %base, <16 x i32> %offs.zext
-  %ptrs = bitcast <16 x i32*> %ptrs32 to <16 x i8*>
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs32 = getelementptr inbounds i32, ptr %base, <16 x i32> %offs.zext
+  %ptrs = bitcast <16 x ptr> %ptrs32 to <16 x ptr>
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_next(i8* %base, <16 x i32>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_next(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_next:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -297,13 +297,13 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_next(i8* %base, <16 x i32>*
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i32>, <16 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %offs = load <16 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -363,15 +363,15 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep(i8* %base, <16 x i8>* %
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-	%offs = load <16 x i8>, <16 x i8>* %offptr, align 1
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %offs
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i8 5
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%offs = load <16 x i8>, ptr %offptr, align 1
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> %offs
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i8 5
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep2(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep2(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI8_0
@@ -398,14 +398,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2gep2(i8* %base, <16 x i8>*
 ; CHECK-NEXT:    .byte 47 @ 0x2f
 ; CHECK-NEXT:    .byte 50 @ 0x32
 entry:
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i8 5
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i8 5
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI9_0
@@ -432,14 +432,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep(i8* %base) {
 ; CHECK-NEXT:    .byte 47 @ 0x2f
 ; CHECK-NEXT:    .byte 50 @ 0x32
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 5
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 5
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep2(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep2(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI10_0
@@ -466,13 +466,13 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep2(i8* %base) {
 ; CHECK-NEXT:    .byte 42 @ 0x2a
 ; CHECK-NEXT:    .byte 45 @ 0x2d
 entry:
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep3(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep3(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -553,14 +553,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep3(i8* %base) {
 ; CHECK-NEXT:    .long 274 @ 0x112
 ; CHECK-NEXT:    .long 277 @ 0x115
 entry:
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 256
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 256
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep4(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep4(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -641,13 +641,13 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep4(i8* %base) {
 ; CHECK-NEXT:    .long 18 @ 0x12
 ; CHECK-NEXT:    .long 21 @ 0x15
 entry:
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 256, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 256, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep5(<16 x i8*> %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep5(<16 x ptr> %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep5:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -699,13 +699,13 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep5(<16 x i8*> %base) {
 ; CHECK-NEXT:    vmov.8 q0[15], r12
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %base, i32 256
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %base, i32 256
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep6(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep6(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep6:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -786,14 +786,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep6(i8* %base) {
 ; CHECK-NEXT:    .long 19 @ 0x13
 ; CHECK-NEXT:    .long 22 @ 0x16
 entry:
-	%ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 256, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 1
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+	%ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 256, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 1
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep7(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep7(ptr %base) {
 ; CHECK-LABEL: unscaled_v16i8_i8_biggep7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -874,14 +874,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_biggep7(i8* %base) {
 ; CHECK-NEXT:    .long 218 @ 0xda
 ; CHECK-NEXT:    .long 221 @ 0xdd
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> <i32 100, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
-	%ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 200
-	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> <i32 100, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
+	%ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 200
+	%gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -936,14 +936,14 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_2(i8* %base, <16 x i8>* %off
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %offs
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %offs = load <16 x i8>, ptr %offptr, align 1
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> %offs
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_3(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_3(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI17_0
@@ -970,12 +970,12 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_3(i8* %base, <16 x i8>* %off
 ; CHECK-NEXT:    .byte 42 @ 0x2a
 ; CHECK-NEXT:    .byte 45 @ 0x2d
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_basei16(i16* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_basei16(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_basei16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -1034,15 +1034,15 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_basei16(i16* %base, <16 x i8>*
 ; CHECK-NEXT:    vmov.8 q0[15], lr
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <16 x i32> %offs.zext
-  %ptrs.cast = bitcast <16 x i16*> %ptrs to <16 x i8*>
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs.cast, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i16, ptr %base, <16 x i32> %offs.zext
+  %ptrs.cast = bitcast <16 x ptr> %ptrs to <16 x ptr>
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs.cast, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_range(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_range(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_range:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI19_0
@@ -1069,13 +1069,13 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_range(i8* %base, <16 x i8>*
 ; CHECK-NEXT:    .byte 124 @ 0x7c
 ; CHECK-NEXT:    .byte 127 @ 0x7f
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 82
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 82
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
-define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_rangebad(i8* %base, <16 x i8>* %offptr) {
+define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_rangebad(ptr %base, ptr %offptr) {
 ; CHECK-LABEL: unscaled_v16i8_i8_rangebad:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -1156,12 +1156,12 @@ define arm_aapcs_vfpcc <16 x i8> @unscaled_v16i8_i8_rangebad(i8* %base, <16 x i8
 ; CHECK-NEXT:    .long 101 @ 0x65
 ; CHECK-NEXT:    .long 104 @ 0x68
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i32 83
-  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i32 83
+  %gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
 	ret <16 x i8> %gather
 }
 
-declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
-declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
-declare <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*>, i32, <2 x i1>, <2 x i8>)
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
+declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>)
+declare <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i8>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
index 7e72edd360993..8013e1d639715 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat=false %s -o - | FileCheck --check-prefix NOGATSCAT %s
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=-mve %s -o - | FileCheck --check-prefix NOMVE %s
 
-define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32_gather(i8* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32_gather(ptr %base, ptr %offptr) {
 ; NOGATSCAT-LABEL: unscaled_i32_i32_gather:
 ; NOGATSCAT:       @ %bb.0: @ %entry
 ; NOGATSCAT-NEXT:    vldrw.u32 q0, [r1]
@@ -31,17 +31,17 @@ define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32_gather(i8* %base, <4 x i32>*
 ; NOMVE-NEXT:    pop {r4, pc}
 
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret <4 x i32> %gather
 }
 
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
 
 
-define arm_aapcs_vfpcc void @unscaled_i32_i8_scatter(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unscaled_i32_i8_scatter(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; NOGATSCAT-LABEL: unscaled_i32_i8_scatter:
 ; NOGATSCAT:       @ %bb.0: @ %entry
 ; NOGATSCAT-NEXT:    .save {r4, r5, r7, lr}
@@ -75,12 +75,12 @@ define arm_aapcs_vfpcc void @unscaled_i32_i8_scatter(i8* %base, <4 x i8>* %offpt
 ; NOMVE-NEXT:    pop {r4, pc}
 
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
index 96a0b53322115..1bf8590152b2a 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
-define void @ptr_iv_v4i32(i32* noalias nocapture readonly %A, i32* noalias nocapture %B, i32 %y) {
+define void @ptr_iv_v4i32(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i32 %y) {
 ; CHECK-LABEL: ptr_iv_v4i32:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -32,16 +32,16 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i32* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i32* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i32, i32* %pointer.phi, i32 16
-  %1 = getelementptr i32, i32* %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %2 = getelementptr i32, i32* %pointer.phi13, i32 16
-  %3 = getelementptr i32, i32* %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %0 = getelementptr i32, ptr %pointer.phi, i32 16
+  %1 = getelementptr i32, ptr %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %2 = getelementptr i32, ptr %pointer.phi13, i32 16
+  %3 = getelementptr i32, ptr %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %4 = add nsw <4 x i32> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %4, <4 x i32*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %4, <4 x ptr> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -50,7 +50,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v4i32_mult(i32* noalias nocapture readonly %A, i32* noalias nocapture %B, i32 %y) {
+define void @ptr_iv_v4i32_mult(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i32 %y) {
 ; CHECK-LABEL: ptr_iv_v4i32_mult:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -87,18 +87,18 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i32* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i32* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i32, i32* %pointer.phi, i32 16
-  %1 = getelementptr i32, i32* %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %gather.address = getelementptr i32, <4 x i32*> %1, i32 3
-  %2 = getelementptr i32, i32* %pointer.phi13, i32 16
-  %3 = getelementptr i32, i32* %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %scatter.address = getelementptr i32, <4 x i32*> %1, i32 5
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %gather.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %0 = getelementptr i32, ptr %pointer.phi, i32 16
+  %1 = getelementptr i32, ptr %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %gather.address = getelementptr i32, <4 x ptr> %1, i32 3
+  %2 = getelementptr i32, ptr %pointer.phi13, i32 16
+  %3 = getelementptr i32, ptr %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %scatter.address = getelementptr i32, <4 x ptr> %1, i32 5
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %gather.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %4 = add nsw <4 x i32> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %4, <4 x i32*> %scatter.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %4, <4 x ptr> %scatter.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -107,7 +107,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v8i16(i16* noalias nocapture readonly %A, i16* noalias nocapture %B, i16 %y) {
+define void @ptr_iv_v8i16(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i16 %y) {
 ; CHECK-LABEL: ptr_iv_v8i16:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -142,16 +142,16 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i16* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i16* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i16, i16* %pointer.phi, i32 32
-  %1 = getelementptr i16, i16* %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %2 = getelementptr i16, i16* %pointer.phi13, i32 32
-  %3 = getelementptr i16, i16* %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %0 = getelementptr i16, ptr %pointer.phi, i32 32
+  %1 = getelementptr i16, ptr %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %2 = getelementptr i16, ptr %pointer.phi13, i32 32
+  %3 = getelementptr i16, ptr %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %4 = add nsw <8 x i16> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %4, <8 x i16*> %3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %4, <8 x ptr> %3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -161,7 +161,7 @@ end:
 }
 
 
-define void @ptr_iv_v8i16_mult(i16* noalias nocapture readonly %A, i16* noalias nocapture %B, i16 %y) {
+define void @ptr_iv_v8i16_mult(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i16 %y) {
 ; CHECK-LABEL: ptr_iv_v8i16_mult:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -207,18 +207,18 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i16* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i16* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i16, i16* %pointer.phi, i32 32
-  %1 = getelementptr i16, i16* %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %gather.address = getelementptr i16, <8 x i16*> %1, i16 3
-  %2 = getelementptr i16, i16* %pointer.phi13, i32 32
-  %3 = getelementptr i16, i16* %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %scatter.address = getelementptr i16, <8 x i16*> %3, i16 5
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %gather.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %0 = getelementptr i16, ptr %pointer.phi, i32 32
+  %1 = getelementptr i16, ptr %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %gather.address = getelementptr i16, <8 x ptr> %1, i16 3
+  %2 = getelementptr i16, ptr %pointer.phi13, i32 32
+  %3 = getelementptr i16, ptr %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %scatter.address = getelementptr i16, <8 x ptr> %3, i16 5
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %gather.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %4 = add nsw <8 x i16> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %4, <8 x i16*> %scatter.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %4, <8 x ptr> %scatter.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -227,7 +227,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v16i8(i8* noalias nocapture readonly %A, i8* noalias nocapture %B, i8 %y) {
+define void @ptr_iv_v16i8(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i8 %y) {
 ; CHECK-LABEL: ptr_iv_v16i8:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -270,16 +270,16 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i8* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i8* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i8, i8* %pointer.phi, i32 64
-  %1 = getelementptr i8, i8* %pointer.phi, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
-  %2 = getelementptr i8, i8* %pointer.phi13, i32 64
-  %3 = getelementptr i8, i8* %pointer.phi13, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %1, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %0 = getelementptr i8, ptr %pointer.phi, i32 64
+  %1 = getelementptr i8, ptr %pointer.phi, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
+  %2 = getelementptr i8, ptr %pointer.phi13, i32 64
+  %3 = getelementptr i8, ptr %pointer.phi13, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %1, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %4 = add nsw <16 x i8> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %4, <16 x i8*> %3, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %4, <16 x ptr> %3, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -289,7 +289,7 @@ end:
 }
 
 
-define void @ptr_iv_v16i8_mult(i8* noalias nocapture readonly %A, i8* noalias nocapture %B, i8 %y) {
+define void @ptr_iv_v16i8_mult(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i8 %y) {
 ; CHECK-LABEL: ptr_iv_v16i8_mult:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -351,18 +351,18 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i8* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi i8* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr i8, i8* %pointer.phi, i32 64
-  %1 = getelementptr i8, i8* %pointer.phi, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
-  %gather.address = getelementptr i8, <16 x i8*> %1, i8 3
-  %2 = getelementptr i8, i8* %pointer.phi13, i32 64
-  %3 = getelementptr i8, i8* %pointer.phi13, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
-  %scatter.address = getelementptr i8, <16 x i8*> %3, i8 5
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %gather.address, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %0 = getelementptr i8, ptr %pointer.phi, i32 64
+  %1 = getelementptr i8, ptr %pointer.phi, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
+  %gather.address = getelementptr i8, <16 x ptr> %1, i8 3
+  %2 = getelementptr i8, ptr %pointer.phi13, i32 64
+  %3 = getelementptr i8, ptr %pointer.phi13, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 32, i8 36, i8 40, i8 44, i8 48, i8 52, i8 56, i8 60>
+  %scatter.address = getelementptr i8, <16 x ptr> %3, i8 5
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %gather.address, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %4 = add nsw <16 x i8> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %4, <16 x i8*> %scatter.address, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %4, <16 x ptr> %scatter.address, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -371,7 +371,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v4f32(float* noalias nocapture readonly %A, float* noalias nocapture %B, float %y) {
+define void @ptr_iv_v4f32(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, float %y) {
 ; CHECK-LABEL: ptr_iv_v4f32:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -402,16 +402,16 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi float* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi float* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr float, float* %pointer.phi, i32 16
-  %1 = getelementptr float, float* %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %2 = getelementptr float, float* %pointer.phi13, i32 16
-  %3 = getelementptr float, float* %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %wide.masked.gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %0 = getelementptr float, ptr %pointer.phi, i32 16
+  %1 = getelementptr float, ptr %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %2 = getelementptr float, ptr %pointer.phi13, i32 16
+  %3 = getelementptr float, ptr %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %wide.masked.gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   %4 = fadd <4 x float> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %4, <4 x float*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %4, <4 x ptr> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -420,7 +420,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v4f32_mult(float* noalias nocapture readonly %A, float* noalias nocapture %B, float %y) {
+define void @ptr_iv_v4f32_mult(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, float %y) {
 ; CHECK-LABEL: ptr_iv_v4f32_mult:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -457,18 +457,18 @@ vector.ph:                                        ; preds = %entry
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi float* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi float* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr float, float* %pointer.phi, i32 16
-  %1 = getelementptr float, float* %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %gather.address = getelementptr float, <4 x float*> %1, i32 3
-  %2 = getelementptr float, float* %pointer.phi13, i32 16
-  %3 = getelementptr float, float* %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %scatter.address = getelementptr float, <4 x float*> %1, i32 5
-  %wide.masked.gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %gather.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  %0 = getelementptr float, ptr %pointer.phi, i32 16
+  %1 = getelementptr float, ptr %pointer.phi, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %gather.address = getelementptr float, <4 x ptr> %1, i32 3
+  %2 = getelementptr float, ptr %pointer.phi13, i32 16
+  %3 = getelementptr float, ptr %pointer.phi13, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %scatter.address = getelementptr float, <4 x ptr> %1, i32 5
+  %wide.masked.gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %gather.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
   %4 = fadd <4 x float> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %4, <4 x float*> %scatter.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %4, <4 x ptr> %scatter.address, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -477,7 +477,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v8f16(half* noalias nocapture readonly %A, half* noalias nocapture %B, float %y) {
+define void @ptr_iv_v8f16(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, float %y) {
 ; CHECK-LABEL: ptr_iv_v8f16:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -516,16 +516,16 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi half* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi half* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr half, half* %pointer.phi, i32 32
-  %1 = getelementptr half, half* %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %2 = getelementptr half, half* %pointer.phi13, i32 32
-  %3 = getelementptr half, half* %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %wide.masked.gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %0 = getelementptr half, ptr %pointer.phi, i32 32
+  %1 = getelementptr half, ptr %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %2 = getelementptr half, ptr %pointer.phi13, i32 32
+  %3 = getelementptr half, ptr %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %wide.masked.gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %1, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   %4 = fadd <8 x half> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %4, <8 x half*> %3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %4, <8 x ptr> %3, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -534,7 +534,7 @@ end:
   ret void
 }
 
-define void @ptr_iv_v8f16_mult(half* noalias nocapture readonly %A, half* noalias nocapture %B, float %y) {
+define void @ptr_iv_v8f16_mult(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, float %y) {
 ; CHECK-LABEL: ptr_iv_v8f16_mult:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r7, lr}
@@ -583,18 +583,18 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi half* [ %A, %vector.ph ], [ %0, %vector.body ]
-  %pointer.phi13 = phi half* [ %B, %vector.ph ], [ %2, %vector.body ]
+  %pointer.phi = phi ptr [ %A, %vector.ph ], [ %0, %vector.body ]
+  %pointer.phi13 = phi ptr [ %B, %vector.ph ], [ %2, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr half, half* %pointer.phi, i32 32
-  %1 = getelementptr half, half* %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %gather.address = getelementptr half, <8 x half*> %1, i32 3
-  %2 = getelementptr half, half* %pointer.phi13, i32 32
-  %3 = getelementptr half, half* %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
-  %scatter.address = getelementptr half, <8 x half*> %1, i32 5
-  %wide.masked.gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %gather.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
+  %0 = getelementptr half, ptr %pointer.phi, i32 32
+  %1 = getelementptr half, ptr %pointer.phi, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %gather.address = getelementptr half, <8 x ptr> %1, i32 3
+  %2 = getelementptr half, ptr %pointer.phi13, i32 32
+  %3 = getelementptr half, ptr %pointer.phi13, <8 x i16> <i16 0, i16 4, i16 8, i16 12, i16 16, i16 20, i16 24, i16 28>
+  %scatter.address = getelementptr half, <8 x ptr> %1, i32 5
+  %wide.masked.gather = call <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr> %gather.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x half> undef)
   %4 = fadd <8 x half> %wide.masked.gather, %broadcast.splat
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %4, <8 x half*> %scatter.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %4, <8 x ptr> %scatter.address, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %5 = icmp eq i32 %index.next, 996
   br i1 %5, label %end, label %vector.body
@@ -604,7 +604,7 @@ end:
 }
 
 
-define arm_aapcs_vfpcc void @three_pointer_iv_v4i32(i32* nocapture readonly %x, i32* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @three_pointer_iv_v4i32(ptr nocapture readonly %x, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: three_pointer_iv_v4i32:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r4, lr}
@@ -657,27 +657,27 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i32* [ %x, %vector.ph ], [ %v3, %vector.body ]
-  %pointer.phi55 = phi i32* [ %z, %vector.ph ], [ %v4, %vector.body ]
+  %pointer.phi = phi ptr [ %x, %vector.ph ], [ %v3, %vector.body ]
+  %pointer.phi55 = phi ptr [ %z, %vector.ph ], [ %v4, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vector.gep = getelementptr i32, i32* %pointer.phi, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %v3 = getelementptr i32, i32* %pointer.phi, i32 12
-  %vector.gep56 = getelementptr i32, i32* %pointer.phi55, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %v4 = getelementptr i32, i32* %pointer.phi55, i32 12
+  %vector.gep = getelementptr i32, ptr %pointer.phi, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %v3 = getelementptr i32, ptr %pointer.phi, i32 12
+  %vector.gep56 = getelementptr i32, ptr %pointer.phi55, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %v4 = getelementptr i32, ptr %pointer.phi55, i32 12
   %v5 = add i32 %index, 0
-  %v6 = getelementptr inbounds i32, <4 x i32*> %vector.gep, i32 1
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %vector.gep, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %v7 = getelementptr inbounds i32, <4 x i32*> %vector.gep, i32 2
-  %wide.masked.gather57 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %v6, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %wide.masked.gather58 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %v7, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %v6 = getelementptr inbounds i32, <4 x ptr> %vector.gep, i32 1
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %vector.gep, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %v7 = getelementptr inbounds i32, <4 x ptr> %vector.gep, i32 2
+  %wide.masked.gather57 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %v6, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %wide.masked.gather58 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %v7, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %v11 = mul nuw nsw <4 x i32> %wide.masked.gather, <i32 10, i32 10, i32 10, i32 10>
   %v13 = mul nuw nsw <4 x i32> %wide.masked.gather, %wide.masked.gather57
   %v15 = mul nuw nsw <4 x i32> %wide.masked.gather, %wide.masked.gather58
-  %v17 = getelementptr inbounds i32, <4 x i32*> %vector.gep56, i32 1
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %v11, <4 x i32*> %vector.gep56, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-  %v18 = getelementptr inbounds i32, <4 x i32*> %vector.gep56, i32 2
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %v13, <4 x i32*> %v17, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %v15, <4 x i32*> %v18, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %v17 = getelementptr inbounds i32, <4 x ptr> %vector.gep56, i32 1
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %v11, <4 x ptr> %vector.gep56, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %v18 = getelementptr inbounds i32, <4 x ptr> %vector.gep56, i32 2
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %v13, <4 x ptr> %v17, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %v15, <4 x ptr> %v18, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %v37 = icmp eq i32 %index.next, %n
   br i1 %v37, label %end, label %vector.body
@@ -686,7 +686,7 @@ end:
   ret void;
 }
 
-define arm_aapcs_vfpcc void @three_pointer_iv_v4i8(i8* nocapture readonly %x, i8* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @three_pointer_iv_v4i8(ptr nocapture readonly %x, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: three_pointer_iv_v4i8:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r4, lr}
@@ -739,19 +739,19 @@ vector.ph:
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %pointer.phi = phi i8* [ %x, %vector.ph ], [ %v3, %vector.body ]
-  %pointer.phi55 = phi i8* [ %z, %vector.ph ], [ %v4, %vector.body ]
+  %pointer.phi = phi ptr [ %x, %vector.ph ], [ %v3, %vector.body ]
+  %pointer.phi55 = phi ptr [ %z, %vector.ph ], [ %v4, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vector.gep = getelementptr i8, i8* %pointer.phi, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %v3 = getelementptr i8, i8* %pointer.phi, i32 12
-  %vector.gep56 = getelementptr i8, i8* %pointer.phi55, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %v4 = getelementptr i8, i8* %pointer.phi55, i32 12
+  %vector.gep = getelementptr i8, ptr %pointer.phi, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %v3 = getelementptr i8, ptr %pointer.phi, i32 12
+  %vector.gep56 = getelementptr i8, ptr %pointer.phi55, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %v4 = getelementptr i8, ptr %pointer.phi55, i32 12
   %v5 = add i32 %index, 0
-  %v6 = getelementptr inbounds i8, <4 x i8*> %vector.gep, i32 1
-  %wide.masked.gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %vector.gep, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
-  %v7 = getelementptr inbounds i8, <4 x i8*> %vector.gep, i32 2
-  %wide.masked.gather57 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %v6, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
-  %wide.masked.gather58 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %v7, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %v6 = getelementptr inbounds i8, <4 x ptr> %vector.gep, i32 1
+  %wide.masked.gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %vector.gep, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %v7 = getelementptr inbounds i8, <4 x ptr> %vector.gep, i32 2
+  %wide.masked.gather57 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %v6, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %wide.masked.gather58 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %v7, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
   %v8 = zext <4 x i8> %wide.masked.gather to <4 x i32>
   %v9 = zext <4 x i8> %wide.masked.gather57 to <4 x i32>
   %v10 = zext <4 x i8> %wide.masked.gather58 to <4 x i32>
@@ -761,11 +761,11 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %v14 = trunc <4 x i32> %v13 to <4 x i8>
   %v15 = mul nuw nsw <4 x i32> %v8, %v10
   %v16 = trunc <4 x i32> %v15 to <4 x i8>
-  %v17 = getelementptr inbounds i8, <4 x i8*> %vector.gep56, i32 1
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %v12, <4 x i8*> %vector.gep56, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-  %v18 = getelementptr inbounds i8, <4 x i8*> %vector.gep56, i32 2
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %v14, <4 x i8*> %v17, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %v16, <4 x i8*> %v18, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %v17 = getelementptr inbounds i8, <4 x ptr> %vector.gep56, i32 1
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %v12, <4 x ptr> %vector.gep56, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %v18 = getelementptr inbounds i8, <4 x ptr> %vector.gep56, i32 2
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %v14, <4 x ptr> %v17, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %v16, <4 x ptr> %v18, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %v37 = icmp eq i32 %index.next, %n
   br i1 %v37, label %end, label %vector.body
@@ -774,7 +774,7 @@ end:
   ret void;
 }
 
-define arm_aapcs_vfpcc void @three_pointer_iv_v8i16(i16* nocapture readonly %x, i16* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @three_pointer_iv_v8i16(ptr nocapture readonly %x, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: three_pointer_iv_v8i16:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r4, lr}
@@ -839,27 +839,27 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i16* [ %x, %vector.ph ], [ %v3, %vector.body ]
-  %pointer.phi55 = phi i16* [ %z, %vector.ph ], [ %v4, %vector.body ]
+  %pointer.phi = phi ptr [ %x, %vector.ph ], [ %v3, %vector.body ]
+  %pointer.phi55 = phi ptr [ %z, %vector.ph ], [ %v4, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vector.gep = getelementptr i16, i16* %pointer.phi, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
-  %v3 = getelementptr i16, i16* %pointer.phi, i32 24
-  %vector.gep56 = getelementptr i16, i16* %pointer.phi55, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
-  %v4 = getelementptr i16, i16* %pointer.phi55, i32 24
+  %vector.gep = getelementptr i16, ptr %pointer.phi, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
+  %v3 = getelementptr i16, ptr %pointer.phi, i32 24
+  %vector.gep56 = getelementptr i16, ptr %pointer.phi55, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
+  %v4 = getelementptr i16, ptr %pointer.phi55, i32 24
   %v5 = add i32 %index, 0
-  %v6 = getelementptr inbounds i16, <8 x i16*> %vector.gep, i16 1
-  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %vector.gep, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
-  %v7 = getelementptr inbounds i16, <8 x i16*> %vector.gep, i16 2
-  %wide.masked.gather57 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %v6, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
-  %wide.masked.gather58 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %v7, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %v6 = getelementptr inbounds i16, <8 x ptr> %vector.gep, i16 1
+  %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %vector.gep, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %v7 = getelementptr inbounds i16, <8 x ptr> %vector.gep, i16 2
+  %wide.masked.gather57 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %v6, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
+  %wide.masked.gather58 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %v7, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
   %v11 = mul nuw nsw <8 x i16> %wide.masked.gather, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %v13 = mul nuw nsw <8 x i16> %wide.masked.gather, %wide.masked.gather57
   %v15 = mul nuw nsw <8 x i16> %wide.masked.gather, %wide.masked.gather58
-  %v17 = getelementptr inbounds i16, <8 x i16*> %vector.gep56, i32 1
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %v11, <8 x i16*> %vector.gep56, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
-  %v18 = getelementptr inbounds i16, <8 x i16*> %vector.gep56, i32 2
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %v13, <8 x i16*> %v17, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %v15, <8 x i16*> %v18, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %v17 = getelementptr inbounds i16, <8 x ptr> %vector.gep56, i32 1
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %v11, <8 x ptr> %vector.gep56, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %v18 = getelementptr inbounds i16, <8 x ptr> %vector.gep56, i32 2
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %v13, <8 x ptr> %v17, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %v15, <8 x ptr> %v18, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %v37 = icmp eq i32 %index.next, %n
   br i1 %v37, label %end, label %vector.body
@@ -868,7 +868,7 @@ end:
   ret void;
 }
 
-define arm_aapcs_vfpcc void @three_pointer_iv_v16i8(i8* nocapture readonly %x, i8* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @three_pointer_iv_v16i8(ptr nocapture readonly %x, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: three_pointer_iv_v16i8:
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .save {r4, lr}
@@ -957,27 +957,27 @@ vector.ph:
   br label %vector.body
 
 vector.body:
-  %pointer.phi = phi i8* [ %x, %vector.ph ], [ %v3, %vector.body ]
-  %pointer.phi55 = phi i8* [ %z, %vector.ph ], [ %v4, %vector.body ]
+  %pointer.phi = phi ptr [ %x, %vector.ph ], [ %v3, %vector.body ]
+  %pointer.phi55 = phi ptr [ %z, %vector.ph ], [ %v4, %vector.body ]
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %vector.gep = getelementptr i8, i8* %pointer.phi, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %v3 = getelementptr i8, i8* %pointer.phi, i32 48
-  %vector.gep56 = getelementptr i8, i8* %pointer.phi55, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %v4 = getelementptr i8, i8* %pointer.phi55, i32 48
+  %vector.gep = getelementptr i8, ptr %pointer.phi, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %v3 = getelementptr i8, ptr %pointer.phi, i32 48
+  %vector.gep56 = getelementptr i8, ptr %pointer.phi55, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %v4 = getelementptr i8, ptr %pointer.phi55, i32 48
   %v5 = add i32 %index, 0
-  %v6 = getelementptr inbounds i8, <16 x i8*> %vector.gep, i8 1
-  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %vector.gep, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
-  %v7 = getelementptr inbounds i8, <16 x i8*> %vector.gep, i8 2
-  %wide.masked.gather57 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %v6, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
-  %wide.masked.gather58 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %v7, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %v6 = getelementptr inbounds i8, <16 x ptr> %vector.gep, i8 1
+  %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %vector.gep, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %v7 = getelementptr inbounds i8, <16 x ptr> %vector.gep, i8 2
+  %wide.masked.gather57 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %v6, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
+  %wide.masked.gather58 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %v7, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
   %v11 = mul nuw nsw <16 x i8> %wide.masked.gather, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
   %v13 = mul nuw nsw <16 x i8> %wide.masked.gather, %wide.masked.gather57
   %v15 = mul nuw nsw <16 x i8> %wide.masked.gather, %wide.masked.gather58
-  %v17 = getelementptr inbounds i8, <16 x i8*> %vector.gep56, i32 1
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %v11, <16 x i8*> %vector.gep56, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
-  %v18 = getelementptr inbounds i8, <16 x i8*> %vector.gep56, i32 2
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %v13, <16 x i8*> %v17, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %v15, <16 x i8*> %v18, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %v17 = getelementptr inbounds i8, <16 x ptr> %vector.gep56, i32 1
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %v11, <16 x ptr> %vector.gep56, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %v18 = getelementptr inbounds i8, <16 x ptr> %vector.gep56, i32 2
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %v13, <16 x ptr> %v17, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %v15, <16 x ptr> %v18, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %v37 = icmp eq i32 %index.next, %n
   br i1 %v37, label %end, label %vector.body
@@ -986,16 +986,16 @@ end:
   ret void;
 }
 
-declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
-declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
-declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>)
-declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>)
-declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
-
-declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>)
+declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
+declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>)
+declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>)
+declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>)
+
+declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll
index 44f663ed44c6c..9093b9af00656 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-tailpred.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -tail-predication=force-enabled %s -o - -opaque-pointers | FileCheck %s
 
-define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
+define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) {
 ; CHECK-LABEL: gather_inc_v4i32_simple:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -53,15 +53,14 @@ vector.ph:                                        ; preds = %for.body.preheader
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
-  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %1 = getelementptr inbounds i32, i32* %dst, i32 %index
-  %2 = bitcast i32* %1 to <4 x i32>*
-  store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
+  %0 = getelementptr inbounds i32, ptr %data, <4 x i32> %vec.ind
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %1 = getelementptr inbounds i32, ptr %dst, i32 %index
+  store <4 x i32> %wide.masked.gather, ptr %1, align 4
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %3 = icmp eq i32 %index.next, %n.vec
-  br i1 %3, label %middle.block, label %vector.body
+  %2 = icmp eq i32 %index.next, %n.vec
+  br i1 %2, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %n
@@ -71,4 +70,4 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-unused.ll b/llvm/test/CodeGen/Thumb2/mve-gather-unused.ll
index b8d732b709cbf..fca30e53a55f9 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-unused.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-unused.ll
@@ -4,35 +4,35 @@
 ; This files has some unused gathers, making sure that they do not cause
 ; problems as the function gets simplified.
 
-define arm_aapcs_vfpcc void @unused1(<4 x i32*> %offs) {
+define arm_aapcs_vfpcc void @unused1(<4 x ptr> %offs) {
 ; CHECK-LABEL: unused1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    bx lr
 entry:
-  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret void
 }
 
-define arm_aapcs_vfpcc void @unused2(<4 x i32*> %offs) {
+define arm_aapcs_vfpcc void @unused2(<4 x ptr> %offs) {
 ; CHECK-LABEL: unused2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    bx lr
 entry:
-  %gather1 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %gather2 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %gather1 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %gather2 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   ret void
 }
 
-define arm_aapcs_vfpcc void @unused2_used(<4 x i32*> %offs) {
+define arm_aapcs_vfpcc void @unused2_used(<4 x ptr> %offs) {
 ; CHECK-LABEL: unused2_used:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    bx lr
 entry:
-  %gather1 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
-  %gather2 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %gather1 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %gather2 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
   %unused = add <4 x i32> %gather1, %gather2
   ret void
 }
 
 
-declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gatherscatter-mmo.ll b/llvm/test/CodeGen/Thumb2/mve-gatherscatter-mmo.ll
index 8c37d85b8b19a..4cf6a84568f66 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gatherscatter-mmo.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gatherscatter-mmo.ll
@@ -1,20 +1,20 @@
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -stop-after=finalize-isel -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_s16(i8* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_s16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: name: test_vldrbq_gather_offset_s16
 ; CHECK: early-clobber %2:mqpr = MVE_VLDRBS16_rq %0, %1, 0, $noreg, $noreg :: (load (s64), align 1)
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i8.v8i16(i8* %base, <8 x i16> %offset, i32 8, i32 0, i32 0)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 8, i32 0, i32 0)
   ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_s32(i8* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: name: test_vldrbq_gather_offset_z_s32
 ; CHECK: early-clobber %4:mqpr = MVE_VLDRBS32_rq %0, %1, 1, killed %3, $noreg :: (load (s32), align 1)
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i8.v4i32.v4i1(i8* %base, <4 x i32> %offset, i32 8, i32 0, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 8, i32 0, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -36,48 +36,48 @@ entry:
   ret <4 x float> %2
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_s64(<2 x i64>* %addr) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_s64(ptr %addr) {
 ; CHECK-LABEL: name: test_vldrdq_gather_base_wb_s64
 ; CHECK: %2:mqpr, early-clobber %3:mqpr = MVE_VLDRDU64_qi_pre %1, 576, 0, $noreg, $noreg :: (load (s128), align 1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64> %0, i32 576)
   %2 = extractvalue { <2 x i64>, <2 x i64> } %1, 1
-  store <2 x i64> %2, <2 x i64>* %addr, align 8
+  store <2 x i64> %2, ptr %addr, align 8
   %3 = extractvalue { <2 x i64>, <2 x i64> } %1, 0
   ret <2 x i64> %3
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_z_f32(<4 x i32>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_z_f32(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: name: test_vldrwq_gather_base_wb_z_f32
 ; CHECK: %4:mqpr, early-clobber %5:mqpr = MVE_VLDRWU32_qi_pre %3, -352, 1, killed %2, $noreg :: (load (s128), align 1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4f32.v4i32.v4i1(<4 x i32> %0, i32 -352, <4 x i1> %2)
   %4 = extractvalue { <4 x float>, <4 x i32> } %3, 1
-  store <4 x i32> %4, <4 x i32>* %addr, align 8
+  store <4 x i32> %4, ptr %addr, align 8
   %5 = extractvalue { <4 x float>, <4 x i32> } %3, 0
   ret <4 x float> %5
 }
 
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s32(i8* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: name: test_vstrbq_scatter_offset_s32
 ; CHECK: MVE_VSTRB32_rq %2, %0, %1, 0, $noreg, $noreg :: (store (s32), align 1)
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v4i32.v4i32(i8* %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s8(i8* %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s8(ptr %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: name: test_vstrbq_scatter_offset_p_s8
 ; CHECK: MVE_VSTRB8_rq %2, %0, %1, 1, killed %4, $noreg :: (store (s128), align 1)
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v16i8.v16i8.v16i1(i8* %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v16i8.v16i8.v16i1(ptr %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
   ret void
 }
 
@@ -99,39 +99,39 @@ entry:
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_s64(<2 x i64>* %addr, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_s64(ptr %addr, <2 x i64> %value) {
 ; CHECK-LABEL: name: test_vstrdq_scatter_base_wb_s64
 ; CHECK: %3:mqpr = MVE_VSTRD64_qi_pre %1, %2, 208, 0, $noreg, $noreg :: (store (s128), align 1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.v2i64.v2i64(<2 x i64> %0, i32 208, <2 x i64> %value)
-  store <2 x i64> %1, <2 x i64>* %addr, align 8
+  store <2 x i64> %1, ptr %addr, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_s64(<2 x i64>* %addr, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_s64(ptr %addr, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: name: test_vstrdq_scatter_base_wb_p_s64
 ; CHECK: %5:mqpr = MVE_VSTRD64_qi_pre %1, %3, 248, 1, killed %4, $noreg :: (store (s128), align 1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 248, <2 x i64> %value, <2 x i1> %2)
-  store <2 x i64> %3, <2 x i64>* %addr, align 8
+  store <2 x i64> %3, ptr %addr, align 8
   ret void
 }
 
 declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)
 declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
 declare <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32)
-declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i8.v8i16(i8*, <8 x i16>, i32, i32, i32)
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i8.v4i32.v4i1(i8*, <4 x i32>, i32, i32, i32, <4 x i1>)
+declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr, <8 x i16>, i32, i32, i32)
+declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr, <4 x i32>, i32, i32, i32, <4 x i1>)
 declare <2 x i64> @llvm.arm.mve.vldr.gather.base.v2i64.v2i64(<2 x i64>, i32)
 declare <4 x float> @llvm.arm.mve.vldr.gather.base.predicated.v4f32.v4i32.v4i1(<4 x i32>, i32, <4 x i1>)
 declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64>, i32)
 declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4f32.v4i32.v4i1(<4 x i32>, i32, <4 x i1>)
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i8.v4i32.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v16i8.v16i8.v16i1(i8*, <16 x i8>, <16 x i8>, i32, i32, <16 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr, <4 x i32>, <4 x i32>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v16i8.v16i8.v16i1(ptr, <16 x i8>, <16 x i8>, i32, i32, <16 x i1>)
 declare void @llvm.arm.mve.vstr.scatter.base.v2i64.v2i64(<2 x i64>, i32, <2 x i64>)
 declare void @llvm.arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v2i1(<2 x i64>, i32, <2 x i64>, <2 x i1>)
 declare <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.v2i64.v2i64(<2 x i64>, i32, <2 x i64>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/idup.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/idup.ll
index 6cb19e837f2ca..c523327306e63 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/idup.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/idup.ll
@@ -133,7 +133,7 @@ entry:
   ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vidupq_wb_u8(i32* nocapture %a) {
+define arm_aapcs_vfpcc <16 x i8> @test_vidupq_wb_u8(ptr nocapture %a) {
 ; CHECK-LABEL: test_vidupq_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -141,15 +141,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vidupq_wb_u8(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vidup.v16i8(i32 %0, i32 8)
   %2 = extractvalue { <16 x i8>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <16 x i8>, i32 } %1, 0
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vidupq_wb_u16(i32* nocapture %a) {
+define arm_aapcs_vfpcc <8 x i16> @test_vidupq_wb_u16(ptr nocapture %a) {
 ; CHECK-LABEL: test_vidupq_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -157,15 +157,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vidupq_wb_u16(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vidup.v8i16(i32 %0, i32 1)
   %2 = extractvalue { <8 x i16>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <8 x i16>, i32 } %1, 0
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vidupq_wb_u32(i32* nocapture %a) {
+define arm_aapcs_vfpcc <4 x i32> @test_vidupq_wb_u32(ptr nocapture %a) {
 ; CHECK-LABEL: test_vidupq_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -173,15 +173,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vidupq_wb_u32(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.v4i32(i32 %0, i32 4)
   %2 = extractvalue { <4 x i32>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <4 x i32>, i32 } %1, 0
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vddupq_wb_u8(i32* nocapture %a) {
+define arm_aapcs_vfpcc <16 x i8> @test_vddupq_wb_u8(ptr nocapture %a) {
 ; CHECK-LABEL: test_vddupq_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -189,15 +189,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vddupq_wb_u8(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vddup.v16i8(i32 %0, i32 2)
   %2 = extractvalue { <16 x i8>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <16 x i8>, i32 } %1, 0
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vddupq_wb_u16(i32* nocapture %a) {
+define arm_aapcs_vfpcc <8 x i16> @test_vddupq_wb_u16(ptr nocapture %a) {
 ; CHECK-LABEL: test_vddupq_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -205,15 +205,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vddupq_wb_u16(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vddup.v8i16(i32 %0, i32 8)
   %2 = extractvalue { <8 x i16>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <8 x i16>, i32 } %1, 0
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vddupq_wb_u32(i32* nocapture %a) {
+define arm_aapcs_vfpcc <4 x i32> @test_vddupq_wb_u32(ptr nocapture %a) {
 ; CHECK-LABEL: test_vddupq_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -221,15 +221,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vddupq_wb_u32(i32* nocapture %a) {
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vddup.v4i32(i32 %0, i32 2)
   %2 = extractvalue { <4 x i32>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <4 x i32>, i32 } %1, 0
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_wb_u8(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_wb_u8(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_vdwdupq_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -237,15 +237,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_wb_u8(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vdwdup.v16i8(i32 %0, i32 %b, i32 4)
   %2 = extractvalue { <16 x i8>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <16 x i8>, i32 } %1, 0
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_wb_u16(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_wb_u16(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_vdwdupq_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -253,15 +253,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_wb_u16(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vdwdup.v8i16(i32 %0, i32 %b, i32 4)
   %2 = extractvalue { <8 x i16>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <8 x i16>, i32 } %1, 0
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_wb_u8(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_wb_u8(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_viwdupq_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -269,15 +269,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_wb_u8(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <16 x i8>, i32 } @llvm.arm.mve.viwdup.v16i8(i32 %0, i32 %b, i32 1)
   %2 = extractvalue { <16 x i8>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <16 x i8>, i32 } %1, 0
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_wb_u16(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_wb_u16(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_viwdupq_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -285,15 +285,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_wb_u16(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <8 x i16>, i32 } @llvm.arm.mve.viwdup.v8i16(i32 %0, i32 %b, i32 1)
   %2 = extractvalue { <8 x i16>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <8 x i16>, i32 } %1, 0
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_wb_u32(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_wb_u32(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_viwdupq_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -301,15 +301,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_wb_u32(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <4 x i32>, i32 } @llvm.arm.mve.viwdup.v4i32(i32 %0, i32 %b, i32 8)
   %2 = extractvalue { <4 x i32>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <4 x i32>, i32 } %1, 0
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_wb_u32(i32* nocapture %a, i32 %b) {
+define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_wb_u32(ptr nocapture %a, i32 %b) {
 ; CHECK-LABEL: test_vdwdupq_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -317,10 +317,10 @@ define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_wb_u32(i32* nocapture %a, i32 %b)
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vdwdup.v4i32(i32 %0, i32 %b, i32 2)
   %2 = extractvalue { <4 x i32>, i32 } %1, 1
-  store i32 %2, i32* %a, align 4
+  store i32 %2, ptr %a, align 4
   %3 = extractvalue { <4 x i32>, i32 } %1, 0
   ret <4 x i32> %3
 }
@@ -505,7 +505,7 @@ entry:
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vidupq_m_wb_u8(<16 x i8> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vidupq_m_wb_u8(<16 x i8> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vidupq_m_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -515,17 +515,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vidupq_m_wb_u8(<16 x i8> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vidup.predicated.v16i8.v16i1(<16 x i8> %inactive, i32 %0, i32 8, <16 x i1> %2)
   %4 = extractvalue { <16 x i8>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <16 x i8>, i32 } %3, 0
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vidupq_m_wb_u16(<8 x i16> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vidupq_m_wb_u16(<8 x i16> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vidupq_m_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -535,17 +535,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_vidupq_m_wb_u16(<8 x i16> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vidup.predicated.v8i16.v8i1(<8 x i16> %inactive, i32 %0, i32 2, <8 x i1> %2)
   %4 = extractvalue { <8 x i16>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <8 x i16>, i32 } %3, 0
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vidupq_m_wb_u32(<4 x i32> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vidupq_m_wb_u32(<4 x i32> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vidupq_m_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -555,17 +555,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_vidupq_m_wb_u32(<4 x i32> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vidup.predicated.v4i32.v4i1(<4 x i32> %inactive, i32 %0, i32 8, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <4 x i32>, i32 } %3, 0
   ret <4 x i32> %5
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vddupq_m_wb_u8(<16 x i8> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vddupq_m_wb_u8(<16 x i8> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vddupq_m_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -575,17 +575,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vddupq_m_wb_u8(<16 x i8> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vddup.predicated.v16i8.v16i1(<16 x i8> %inactive, i32 %0, i32 1, <16 x i1> %2)
   %4 = extractvalue { <16 x i8>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <16 x i8>, i32 } %3, 0
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vddupq_m_wb_u16(<8 x i16> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vddupq_m_wb_u16(<8 x i16> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vddupq_m_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -595,17 +595,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_vddupq_m_wb_u16(<8 x i16> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vddup.predicated.v8i16.v8i1(<8 x i16> %inactive, i32 %0, i32 1, <8 x i1> %2)
   %4 = extractvalue { <8 x i16>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <8 x i16>, i32 } %3, 0
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vddupq_m_wb_u32(<4 x i32> %inactive, i32* nocapture %a, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vddupq_m_wb_u32(<4 x i32> %inactive, ptr nocapture %a, i16 zeroext %p) {
 ; CHECK-LABEL: test_vddupq_m_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -615,17 +615,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_vddupq_m_wb_u32(<4 x i32> %inactive, i32*
 ; CHECK-NEXT:    str r2, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vddup.predicated.v4i32.v4i1(<4 x i32> %inactive, i32 %0, i32 4, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <4 x i32>, i32 } %3, 0
   ret <4 x i32> %5
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_m_wb_u8(<16 x i8> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_m_wb_u8(<16 x i8> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_viwdupq_m_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -635,17 +635,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_viwdupq_m_wb_u8(<16 x i8> %inactive, i32*
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { <16 x i8>, i32 } @llvm.arm.mve.viwdup.predicated.v16i8.v16i1(<16 x i8> %inactive, i32 %0, i32 %b, i32 8, <16 x i1> %2)
   %4 = extractvalue { <16 x i8>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <16 x i8>, i32 } %3, 0
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_m_wb_u16(<8 x i16> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_m_wb_u16(<8 x i16> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_viwdupq_m_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -655,17 +655,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_viwdupq_m_wb_u16(<8 x i16> %inactive, i32
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { <8 x i16>, i32 } @llvm.arm.mve.viwdup.predicated.v8i16.v8i1(<8 x i16> %inactive, i32 %0, i32 %b, i32 8, <8 x i1> %2)
   %4 = extractvalue { <8 x i16>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <8 x i16>, i32 } %3, 0
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_m_wb_u32(<4 x i32> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_m_wb_u32(<4 x i32> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_viwdupq_m_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -675,17 +675,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_viwdupq_m_wb_u32(<4 x i32> %inactive, i32
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.viwdup.predicated.v4i32.v4i1(<4 x i32> %inactive, i32 %0, i32 %b, i32 4, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <4 x i32>, i32 } %3, 0
   ret <4 x i32> %5
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_m_wb_u8(<16 x i8> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_m_wb_u8(<16 x i8> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vdwdupq_m_wb_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -695,17 +695,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vdwdupq_m_wb_u8(<16 x i8> %inactive, i32*
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vdwdup.predicated.v16i8.v16i1(<16 x i8> %inactive, i32 %0, i32 %b, i32 1, <16 x i1> %2)
   %4 = extractvalue { <16 x i8>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <16 x i8>, i32 } %3, 0
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_m_wb_u16(<8 x i16> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_m_wb_u16(<8 x i16> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vdwdupq_m_wb_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -715,17 +715,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_vdwdupq_m_wb_u16(<8 x i16> %inactive, i32
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { <8 x i16>, i32 } @llvm.arm.mve.vdwdup.predicated.v8i16.v8i1(<8 x i16> %inactive, i32 %0, i32 %b, i32 4, <8 x i1> %2)
   %4 = extractvalue { <8 x i16>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <8 x i16>, i32 } %3, 0
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_m_wb_u32(<4 x i32> %inactive, i32* nocapture %a, i32 %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_m_wb_u32(<4 x i32> %inactive, ptr nocapture %a, i32 %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vdwdupq_m_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr.w r12, [r0]
@@ -735,12 +735,12 @@ define arm_aapcs_vfpcc <4 x i32> @test_vdwdupq_m_wb_u32(<4 x i32> %inactive, i32
 ; CHECK-NEXT:    str.w r12, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vdwdup.predicated.v4i32.v4i1(<4 x i32> %inactive, i32 %0, i32 %b, i32 4, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, i32 } %3, 1
-  store i32 %4, i32* %a, align 4
+  store i32 %4, ptr %a, align 4
   %5 = extractvalue { <4 x i32>, i32 } %3, 0
   ret <4 x i32> %5
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/load-store.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/load-store.ll
index 6b5a1dd2bf21d..cb4748db7f527 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/load-store.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/load-store.ll
@@ -1,95 +1,87 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <8 x half> @test_vld1q_f16(half* %base) {
+define arm_aapcs_vfpcc <8 x half> @test_vld1q_f16(ptr %base) {
 ; CHECK-LABEL: test_vld1q_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  ret <8 x half> %1
+  %0 = load <8 x half>, ptr %base, align 2
+  ret <8 x half> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vld1q_f32(float* %base) {
+define arm_aapcs_vfpcc <4 x float> @test_vld1q_f32(ptr %base) {
 ; CHECK-LABEL: test_vld1q_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  ret <4 x float> %1
+  %0 = load <4 x float>, ptr %base, align 4
+  ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vld1q_s8(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @test_vld1q_s8(ptr %base) {
 ; CHECK-LABEL: test_vld1q_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  ret <16 x i8> %1
+  %0 = load <16 x i8>, ptr %base, align 1
+  ret <16 x i8> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vld1q_s16(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vld1q_s16(ptr %base) {
 ; CHECK-LABEL: test_vld1q_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  ret <8 x i16> %1
+  %0 = load <8 x i16>, ptr %base, align 2
+  ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vld1q_s32(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vld1q_s32(ptr %base) {
 ; CHECK-LABEL: test_vld1q_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  ret <4 x i32> %1
+  %0 = load <4 x i32>, ptr %base, align 4
+  ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vld1q_u8(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @test_vld1q_u8(ptr %base) {
 ; CHECK-LABEL: test_vld1q_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  ret <16 x i8> %1
+  %0 = load <16 x i8>, ptr %base, align 1
+  ret <16 x i8> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vld1q_u16(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vld1q_u16(ptr %base) {
 ; CHECK-LABEL: test_vld1q_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  ret <8 x i16> %1
+  %0 = load <8 x i16>, ptr %base, align 2
+  ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vld1q_u32(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vld1q_u32(ptr %base) {
 ; CHECK-LABEL: test_vld1q_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  ret <4 x i32> %1
+  %0 = load <4 x i32>, ptr %base, align 4
+  ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vld1q_z_f16(half* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x half> @test_vld1q_z_f16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -97,18 +89,17 @@ define arm_aapcs_vfpcc <8 x half> @test_vld1q_z_f16(half* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %2, <8 x half> zeroinitializer)
-  ret <8 x half> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x half> zeroinitializer)
+  ret <8 x half> %2
 }
 
 declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
 
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>)
 
-define arm_aapcs_vfpcc <4 x float> @test_vld1q_z_f32(float* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vld1q_z_f32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -116,18 +107,17 @@ define arm_aapcs_vfpcc <4 x float> @test_vld1q_z_f32(float* %base, i16 zeroext %
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %2, <4 x float> zeroinitializer)
-  ret <4 x float> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
+  ret <4 x float> %2
 }
 
 declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
 
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 
-define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_s8(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_s8(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -135,18 +125,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_s8(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  %3 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer)
-  ret <16 x i8> %3
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %base, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
+  ret <16 x i8> %2
 }
 
 declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)
 
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_s16(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_s16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -154,16 +143,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_s16(i16* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %2, <8 x i16> zeroinitializer)
-  ret <8 x i16> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x i16> zeroinitializer)
+  ret <8 x i16> %2
 }
 
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_s32(i32* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_s32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -171,16 +159,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_s32(i32* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %2, <4 x i32> zeroinitializer)
-  ret <4 x i32> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x i32> zeroinitializer)
+  ret <4 x i32> %2
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
 
-define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_u8(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_u8(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -188,14 +175,13 @@ define arm_aapcs_vfpcc <16 x i8> @test_vld1q_z_u8(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  %3 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer)
-  ret <16 x i8> %3
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %base, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
+  ret <16 x i8> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_u16(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_u16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -203,14 +189,13 @@ define arm_aapcs_vfpcc <8 x i16> @test_vld1q_z_u16(i16* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %2, <8 x i16> zeroinitializer)
-  ret <8 x i16> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x i16> zeroinitializer)
+  ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_u32(i32* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_u32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vld1q_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -218,84 +203,77 @@ define arm_aapcs_vfpcc <4 x i32> @test_vld1q_z_u32(i32* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %2, <4 x i32> zeroinitializer)
-  ret <4 x i32> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x i32> zeroinitializer)
+  ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_s8(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_s8(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  ret <16 x i8> %1
+  %0 = load <16 x i8>, ptr %base, align 1
+  ret <16 x i8> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_s16(i8* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_s16(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  ret <8 x i16> %2
+  %0 = load <8 x i8>, ptr %base, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_s32(i8* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_s32(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  ret <4 x i32> %2
+  %0 = load <4 x i8>, ptr %base, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_u8(i8* %base) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_u8(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  ret <16 x i8> %1
+  %0 = load <16 x i8>, ptr %base, align 1
+  ret <16 x i8> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_u16(i8* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_u16(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  ret <8 x i16> %2
+  %0 = load <8 x i8>, ptr %base, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  ret <8 x i16> %1
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_u32(i8* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_u32(ptr %base) {
 ; CHECK-LABEL: test_vldrbq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  ret <4 x i32> %2
+  %0 = load <4 x i8>, ptr %base, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_s8(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_s8(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -303,14 +281,13 @@ define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_s8(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  %3 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer)
-  ret <16 x i8> %3
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %base, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
+  ret <16 x i8> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_s16(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_s16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -318,17 +295,16 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_s16(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.s16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <8 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %2, <8 x i8> zeroinitializer)
-  %4 = sext <8 x i8> %3 to <8 x i16>
-  ret <8 x i16> %4
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %base, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
+  %3 = sext <8 x i8> %2 to <8 x i16>
+  ret <8 x i16> %3
 }
 
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32 immarg, <8 x i1>, <8 x i8>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_s32(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_s32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -336,17 +312,16 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_s32(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.s32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <4 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %2, <4 x i8> zeroinitializer)
-  %4 = sext <4 x i8> %3 to <4 x i32>
-  ret <4 x i32> %4
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %base, i32 1, <4 x i1> %1, <4 x i8> zeroinitializer)
+  %3 = sext <4 x i8> %2 to <4 x i32>
+  ret <4 x i32> %3
 }
 
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32 immarg, <4 x i1>, <4 x i8>)
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_u8(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_u8(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -354,14 +329,13 @@ define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_z_u8(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  %3 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer)
-  ret <16 x i8> %3
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %base, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
+  ret <16 x i8> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_u16(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_u16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -369,15 +343,14 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_z_u16(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <8 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %2, <8 x i8> zeroinitializer)
-  %4 = zext <8 x i8> %3 to <8 x i16>
-  ret <8 x i16> %4
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %base, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
+  %3 = zext <8 x i8> %2 to <8 x i16>
+  ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_u32(i8* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_u32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -385,72 +358,66 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_z_u32(i8* %base, i16 zeroext %p) {
 ; CHECK-NEXT:    vldrbt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <4 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %2, <4 x i8> zeroinitializer)
-  %4 = zext <4 x i8> %3 to <4 x i32>
-  ret <4 x i32> %4
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %base, i32 1, <4 x i1> %1, <4 x i8> zeroinitializer)
+  %3 = zext <4 x i8> %2 to <4 x i32>
+  ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_f16(half* %base) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_f16(ptr %base) {
 ; CHECK-LABEL: test_vldrhq_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  ret <8 x half> %1
+  %0 = load <8 x half>, ptr %base, align 2
+  ret <8 x half> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_s16(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_s16(ptr %base) {
 ; CHECK-LABEL: test_vldrhq_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  ret <8 x i16> %1
+  %0 = load <8 x i16>, ptr %base, align 2
+  ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_s32(i16* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_s32(ptr %base) {
 ; CHECK-LABEL: test_vldrhq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  ret <4 x i32> %2
+  %0 = load <4 x i16>, ptr %base, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_u16(i16* %base) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_u16(ptr %base) {
 ; CHECK-LABEL: test_vldrhq_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  ret <8 x i16> %1
+  %0 = load <8 x i16>, ptr %base, align 2
+  ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_u32(i16* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_u32(ptr %base) {
 ; CHECK-LABEL: test_vldrhq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  ret <4 x i32> %2
+  %0 = load <4 x i16>, ptr %base, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_z_f16(half* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_z_f16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_z_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -458,14 +425,13 @@ define arm_aapcs_vfpcc <8 x half> @test_vldrhq_z_f16(half* %base, i16 zeroext %p
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %2, <8 x half> zeroinitializer)
-  ret <8 x half> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x half> zeroinitializer)
+  ret <8 x half> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_s16(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_s16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -473,14 +439,13 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_s16(i16* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %2, <8 x i16> zeroinitializer)
-  ret <8 x i16> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x i16> zeroinitializer)
+  ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_s32(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_s32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -488,17 +453,16 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_s32(i16* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrht.s32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <4 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %2, <4 x i16> zeroinitializer)
-  %4 = sext <4 x i16> %3 to <4 x i32>
-  ret <4 x i32> %4
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %base, i32 2, <4 x i1> %1, <4 x i16> zeroinitializer)
+  %3 = sext <4 x i16> %2 to <4 x i32>
+  ret <4 x i32> %3
 }
 
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_u16(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_u16(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -506,14 +470,13 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_z_u16(i16* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrht.u16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  %3 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %2, <8 x i16> zeroinitializer)
-  ret <8 x i16> %3
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %base, i32 2, <8 x i1> %1, <8 x i16> zeroinitializer)
+  ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_u32(i16* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_u32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -521,48 +484,44 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_z_u32(i16* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrht.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <4 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %2, <4 x i16> zeroinitializer)
-  %4 = zext <4 x i16> %3 to <4 x i32>
-  ret <4 x i32> %4
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %base, i32 2, <4 x i1> %1, <4 x i16> zeroinitializer)
+  %3 = zext <4 x i16> %2 to <4 x i32>
+  ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_f32(float* %base) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_f32(ptr %base) {
 ; CHECK-LABEL: test_vldrwq_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  ret <4 x float> %1
+  %0 = load <4 x float>, ptr %base, align 4
+  ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_s32(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_s32(ptr %base) {
 ; CHECK-LABEL: test_vldrwq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  ret <4 x i32> %1
+  %0 = load <4 x i32>, ptr %base, align 4
+  ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_u32(i32* %base) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_u32(ptr %base) {
 ; CHECK-LABEL: test_vldrwq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  ret <4 x i32> %1
+  %0 = load <4 x i32>, ptr %base, align 4
+  ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_z_f32(float* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_z_f32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_z_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -570,14 +529,13 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_z_f32(float* %base, i16 zeroext
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %2, <4 x float> zeroinitializer)
-  ret <4 x float> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
+  ret <4 x float> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_s32(i32* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_s32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -585,14 +543,13 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_s32(i32* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %2, <4 x i32> zeroinitializer)
-  ret <4 x i32> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x i32> zeroinitializer)
+  ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_u32(i32* %base, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_u32(ptr %base, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -600,102 +557,93 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_z_u32(i32* %base, i16 zeroext %p)
 ; CHECK-NEXT:    vldrwt.u32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  %3 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %2, <4 x i32> zeroinitializer)
-  ret <4 x i32> %3
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %base, i32 4, <4 x i1> %1, <4 x i32> zeroinitializer)
+  ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_f16(half* %base, <8 x half> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_f16(ptr %base, <8 x half> %value) {
 ; CHECK-LABEL: test_vst1q_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  store <8 x half> %value, <8 x half>* %0, align 2
+  store <8 x half> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_f32(float* %base, <4 x float> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_f32(ptr %base, <4 x float> %value) {
 ; CHECK-LABEL: test_vst1q_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  store <4 x float> %value, <4 x float>* %0, align 4
+  store <4 x float> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_s8(i8* %base, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_s8(ptr %base, <16 x i8> %value) {
 ; CHECK-LABEL: test_vst1q_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  store <16 x i8> %value, <16 x i8>* %0, align 1
+  store <16 x i8> %value, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_s16(i16* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_s16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vst1q_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  store <8 x i16> %value, <8 x i16>* %0, align 2
+  store <8 x i16> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_s32(i32* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_s32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vst1q_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  store <4 x i32> %value, <4 x i32>* %0, align 4
+  store <4 x i32> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_u8(i8* %base, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_u8(ptr %base, <16 x i8> %value) {
 ; CHECK-LABEL: test_vst1q_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  store <16 x i8> %value, <16 x i8>* %0, align 1
+  store <16 x i8> %value, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_u16(i16* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_u16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vst1q_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  store <8 x i16> %value, <8 x i16>* %0, align 2
+  store <8 x i16> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_u32(i32* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vst1q_u32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vst1q_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  store <4 x i32> %value, <4 x i32>* %0, align 4
+  store <4 x i32> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_p_f16(half* %base, <8 x half> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_f16(ptr %base, <8 x half> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -703,16 +651,15 @@ define arm_aapcs_vfpcc void @test_vst1q_p_f16(half* %base, <8 x half> %value, i1
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %value, <8 x half>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vst1q_p_f32(float* %base, <4 x float> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_f32(ptr %base, <4 x float> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -720,16 +667,15 @@ define arm_aapcs_vfpcc void @test_vst1q_p_f32(float* %base, <4 x float> %value,
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %value, <4 x float>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vst1q_p_s8(i8* %base, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_s8(ptr %base, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -737,16 +683,15 @@ define arm_aapcs_vfpcc void @test_vst1q_p_s8(i8* %base, <16 x i8> %value, i16 ze
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %value, <16 x i8>* %0, i32 1, <16 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %value, ptr %base, i32 1, <16 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>)
 
-define arm_aapcs_vfpcc void @test_vst1q_p_s16(i16* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_s16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -754,16 +699,15 @@ define arm_aapcs_vfpcc void @test_vst1q_p_s16(i16* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %value, <8 x i16>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vst1q_p_s32(i32* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_s32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -771,16 +715,15 @@ define arm_aapcs_vfpcc void @test_vst1q_p_s32(i32* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %value, <4 x i32>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vst1q_p_u8(i8* %base, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_u8(ptr %base, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -788,14 +731,13 @@ define arm_aapcs_vfpcc void @test_vst1q_p_u8(i8* %base, <16 x i8> %value, i16 ze
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %value, <16 x i8>* %0, i32 1, <16 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %value, ptr %base, i32 1, <16 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_p_u16(i16* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_u16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -803,14 +745,13 @@ define arm_aapcs_vfpcc void @test_vst1q_p_u16(i16* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %value, <8 x i16>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vst1q_p_u32(i32* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vst1q_p_u32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vst1q_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -818,84 +759,77 @@ define arm_aapcs_vfpcc void @test_vst1q_p_u32(i32* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %value, <4 x i32>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_s8(i8* %base, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_s8(ptr %base, <16 x i8> %value) {
 ; CHECK-LABEL: test_vstrbq_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  store <16 x i8> %value, <16 x i8>* %0, align 1
+  store <16 x i8> %value, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_s16(i8* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_s16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrbq_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <8 x i16> %value to <8 x i8>
-  %1 = bitcast i8* %base to <8 x i8>*
-  store <8 x i8> %0, <8 x i8>* %1, align 1
+  store <8 x i8> %0, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_s32(i8* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_s32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrbq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i8>
-  %1 = bitcast i8* %base to <4 x i8>*
-  store <4 x i8> %0, <4 x i8>* %1, align 1
+  store <4 x i8> %0, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_u8(i8* %base, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_u8(ptr %base, <16 x i8> %value) {
 ; CHECK-LABEL: test_vstrbq_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  store <16 x i8> %value, <16 x i8>* %0, align 1
+  store <16 x i8> %value, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_u16(i8* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_u16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrbq_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <8 x i16> %value to <8 x i8>
-  %1 = bitcast i8* %base to <8 x i8>*
-  store <8 x i8> %0, <8 x i8>* %1, align 1
+  store <8 x i8> %0, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_u32(i8* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_u32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrbq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i8>
-  %1 = bitcast i8* %base to <4 x i8>*
-  store <4 x i8> %0, <4 x i8>* %1, align 1
+  store <4 x i8> %0, ptr %base, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_s8(i8* %base, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_s8(ptr %base, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -903,14 +837,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_s8(i8* %base, <16 x i8> %value, i16 z
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %value, <16 x i8>* %0, i32 1, <16 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %value, ptr %base, i32 1, <16 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_s16(i8* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_s16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -919,16 +852,15 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_s16(i8* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <8 x i16> %value to <8 x i8>
-  %1 = bitcast i8* %base to <8 x i8>*
-  %2 = zext i16 %p to i32
-  %3 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2)
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %0, <8 x i8>* %1, i32 1, <8 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %base, i32 1, <8 x i1> %2)
   ret void
 }
 
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32 immarg, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_s32(i8* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_s32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -937,16 +869,15 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_s32(i8* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i8>
-  %1 = bitcast i8* %base to <4 x i8>*
-  %2 = zext i16 %p to i32
-  %3 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %0, <4 x i8>* %1, i32 1, <4 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %base, i32 1, <4 x i1> %2)
   ret void
 }
 
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32 immarg, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_u8(i8* %base, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_u8(ptr %base, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -954,14 +885,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_u8(i8* %base, <16 x i8> %value, i16 z
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %base to <16 x i8>*
-  %1 = zext i16 %p to i32
-  %2 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %value, <16 x i8>* %0, i32 1, <16 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %value, ptr %base, i32 1, <16 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_u16(i8* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_u16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -970,14 +900,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_u16(i8* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <8 x i16> %value to <8 x i8>
-  %1 = bitcast i8* %base to <8 x i8>*
-  %2 = zext i16 %p to i32
-  %3 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %2)
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %0, <8 x i8>* %1, i32 1, <8 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %base, i32 1, <8 x i1> %2)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_p_u32(i8* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_p_u32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -986,71 +915,65 @@ define arm_aapcs_vfpcc void @test_vstrbq_p_u32(i8* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i8>
-  %1 = bitcast i8* %base to <4 x i8>*
-  %2 = zext i16 %p to i32
-  %3 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %0, <4 x i8>* %1, i32 1, <4 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %base, i32 1, <4 x i1> %2)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_f16(half* %base, <8 x half> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_f16(ptr %base, <8 x half> %value) {
 ; CHECK-LABEL: test_vstrhq_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  store <8 x half> %value, <8 x half>* %0, align 2
+  store <8 x half> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_s16(i16* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_s16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  store <8 x i16> %value, <8 x i16>* %0, align 2
+  store <8 x i16> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_s32(i16* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_s32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i16>
-  %1 = bitcast i16* %base to <4 x i16>*
-  store <4 x i16> %0, <4 x i16>* %1, align 2
+  store <4 x i16> %0, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_u16(i16* %base, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_u16(ptr %base, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  store <8 x i16> %value, <8 x i16>* %0, align 2
+  store <8 x i16> %value, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_u32(i16* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_u32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i16>
-  %1 = bitcast i16* %base to <4 x i16>*
-  store <4 x i16> %0, <4 x i16>* %1, align 2
+  store <4 x i16> %0, ptr %base, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_p_f16(half* %base, <8 x half> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_p_f16(ptr %base, <8 x half> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_p_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1058,14 +981,13 @@ define arm_aapcs_vfpcc void @test_vstrhq_p_f16(half* %base, <8 x half> %value, i
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast half* %base to <8 x half>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %value, <8 x half>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_p_s16(i16* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_p_s16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1073,14 +995,13 @@ define arm_aapcs_vfpcc void @test_vstrhq_p_s16(i16* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %value, <8 x i16>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_p_s32(i16* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_p_s32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1089,16 +1010,15 @@ define arm_aapcs_vfpcc void @test_vstrhq_p_s32(i16* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i16>
-  %1 = bitcast i16* %base to <4 x i16>*
-  %2 = zext i16 %p to i32
-  %3 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %0, <4 x i16>* %1, i32 2, <4 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %base, i32 2, <4 x i1> %2)
   ret void
 }
 
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32 immarg, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrhq_p_u16(i16* %base, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_p_u16(ptr %base, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1106,14 +1026,13 @@ define arm_aapcs_vfpcc void @test_vstrhq_p_u16(i16* %base, <8 x i16> %value, i16
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %base to <8 x i16>*
-  %1 = zext i16 %p to i32
-  %2 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %value, <8 x i16>* %0, i32 2, <8 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %value, ptr %base, i32 2, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_p_u32(i16* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_p_u32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1122,47 +1041,43 @@ define arm_aapcs_vfpcc void @test_vstrhq_p_u32(i16* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = trunc <4 x i32> %value to <4 x i16>
-  %1 = bitcast i16* %base to <4 x i16>*
-  %2 = zext i16 %p to i32
-  %3 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %0, <4 x i16>* %1, i32 2, <4 x i1> %3)
+  %1 = zext i16 %p to i32
+  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %base, i32 2, <4 x i1> %2)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_f32(float* %base, <4 x float> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_f32(ptr %base, <4 x float> %value) {
 ; CHECK-LABEL: test_vstrwq_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  store <4 x float> %value, <4 x float>* %0, align 4
+  store <4 x float> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_s32(i32* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_s32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  store <4 x i32> %value, <4 x i32>* %0, align 4
+  store <4 x i32> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_u32(i32* %base, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_u32(ptr %base, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  store <4 x i32> %value, <4 x i32>* %0, align 4
+  store <4 x i32> %value, ptr %base, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_p_f32(float* %base, <4 x float> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_p_f32(ptr %base, <4 x float> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_p_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1170,14 +1085,13 @@ define arm_aapcs_vfpcc void @test_vstrwq_p_f32(float* %base, <4 x float> %value,
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast float* %base to <4 x float>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %value, <4 x float>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_p_s32(i32* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_p_s32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1185,14 +1099,13 @@ define arm_aapcs_vfpcc void @test_vstrwq_p_s32(i32* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %value, <4 x i32>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_p_u32(i32* %base, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_p_u32(ptr %base, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1200,9 +1113,8 @@ define arm_aapcs_vfpcc void @test_vstrwq_p_u32(i32* %base, <4 x i32> %value, i16
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %base to <4 x i32>*
-  %1 = zext i16 %p to i32
-  %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %value, <4 x i32>* %0, i32 4, <4 x i1> %2)
+  %0 = zext i16 %p to i32
+  %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %value, ptr %base, i32 4, <4 x i1> %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/scatter-gather.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/scatter-gather.ll
index c3d9b0fa452e1..d4022de7a5e71 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/scatter-gather.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/scatter-gather.ll
@@ -1,79 +1,79 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_s16(i8* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_s16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i8.v8i16(i8* %base, <8 x i16> %offset, i32 8, i32 0, i32 0)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 8, i32 0, i32 0)
   ret <8 x i16> %0
 }
 
-declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i8.v8i16(i8*, <8 x i16>, i32, i32, i32)
+declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr, <8 x i16>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_s32(i8* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_s32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i8.v4i32(i8* %base, <4 x i32> %offset, i32 8, i32 0, i32 0)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 8, i32 0, i32 0)
   ret <4 x i32> %0
 }
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i8.v4i32(i8*, <4 x i32>, i32, i32, i32)
+declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr, <4 x i32>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_s8(i8* %base, <16 x i8> %offset) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_s8(ptr %base, <16 x i8> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0i8.v16i8(i8* %base, <16 x i8> %offset, i32 8, i32 0, i32 0)
+  %0 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0.v16i8(ptr %base, <16 x i8> %offset, i32 8, i32 0, i32 0)
   ret <16 x i8> %0
 }
 
-declare <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0i8.v16i8(i8*, <16 x i8>, i32, i32, i32)
+declare <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0.v16i8(ptr, <16 x i8>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_u16(i8* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_u16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i8.v8i16(i8* %base, <8 x i16> %offset, i32 8, i32 0, i32 1)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 8, i32 0, i32 1)
   ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_u32(i8* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_u32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i8.v4i32(i8* %base, <4 x i32> %offset, i32 8, i32 0, i32 1)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 8, i32 0, i32 1)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_u8(i8* %base, <16 x i8> %offset) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_u8(ptr %base, <16 x i8> %offset) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0i8.v16i8(i8* %base, <16 x i8> %offset, i32 8, i32 0, i32 1)
+  %0 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.v16i8.p0.v16i8(ptr %base, <16 x i8> %offset, i32 8, i32 0, i32 1)
   ret <16 x i8> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_s16(i8* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_s16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -84,15 +84,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_s16(i8* %base, <8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i8.v8i16.v8i1(i8* %base, <8 x i16> %offset, i32 8, i32 0, i32 0, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 8, i32 0, i32 0, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
 declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
 
-declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i8.v8i16.v8i1(i8*, <8 x i16>, i32, i32, i32, <8 x i1>)
+declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr, <8 x i16>, i32, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_s32(i8* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -103,15 +103,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_s32(i8* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i8.v4i32.v4i1(i8* %base, <4 x i32> %offset, i32 8, i32 0, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 8, i32 0, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
 declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i8.v4i32.v4i1(i8*, <4 x i32>, i32, i32, i32, <4 x i1>)
+declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr, <4 x i32>, i32, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_s8(i8* %base, <16 x i8> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_s8(ptr %base, <16 x i8> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -122,15 +122,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_s8(i8* %base, <16
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0i8.v16i8.v16i1(i8* %base, <16 x i8> %offset, i32 8, i32 0, i32 0, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0.v16i8.v16i1(ptr %base, <16 x i8> %offset, i32 8, i32 0, i32 0, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
 declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)
 
-declare <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0i8.v16i8.v16i1(i8*, <16 x i8>, i32, i32, i32, <16 x i1>)
+declare <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0.v16i8.v16i1(ptr, <16 x i8>, i32, i32, i32, <16 x i1>)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_u16(i8* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_u16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -141,11 +141,11 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrbq_gather_offset_z_u16(i8* %base, <8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i8.v8i16.v8i1(i8* %base, <8 x i16> %offset, i32 8, i32 0, i32 1, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 8, i32 0, i32 1, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_u32(i8* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_u32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -156,11 +156,11 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrbq_gather_offset_z_u32(i8* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i8.v4i32.v4i1(i8* %base, <4 x i32> %offset, i32 8, i32 0, i32 1, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 8, i32 0, i32 1, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_u8(i8* %base, <16 x i8> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_u8(ptr %base, <16 x i8> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrbq_gather_offset_z_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -171,7 +171,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vldrbq_gather_offset_z_u8(i8* %base, <16
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0i8.v16i8.v16i1(i8* %base, <16 x i8> %offset, i32 8, i32 0, i32 1, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vldr.gather.offset.predicated.v16i8.p0.v16i8.v16i1(ptr %base, <16 x i8> %offset, i32 8, i32 0, i32 1, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -199,7 +199,7 @@ entry:
   ret <2 x i64> %0
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_s64(<2 x i64>* %addr) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_s64(ptr %addr) {
 ; CHECK-LABEL: test_vldrdq_gather_base_wb_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -207,17 +207,17 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_s64(<2 x i64>* %add
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64> %0, i32 576)
   %2 = extractvalue { <2 x i64>, <2 x i64> } %1, 1
-  store <2 x i64> %2, <2 x i64>* %addr, align 8
+  store <2 x i64> %2, ptr %addr, align 8
   %3 = extractvalue { <2 x i64>, <2 x i64> } %1, 0
   ret <2 x i64> %3
 }
 
 declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64>, i32)
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_u64(<2 x i64>* %addr) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_u64(ptr %addr) {
 ; CHECK-LABEL: test_vldrdq_gather_base_wb_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -225,15 +225,15 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_u64(<2 x i64>* %add
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.v2i64.v2i64(<2 x i64> %0, i32 -328)
   %2 = extractvalue { <2 x i64>, <2 x i64> } %1, 1
-  store <2 x i64> %2, <2 x i64>* %addr, align 8
+  store <2 x i64> %2, ptr %addr, align 8
   %3 = extractvalue { <2 x i64>, <2 x i64> } %1, 0
   ret <2 x i64> %3
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_s64(<2 x i64>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_s64(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_base_wb_z_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -243,12 +243,12 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_s64(<2 x i64>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 664, <2 x i1> %2)
   %4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1
-  store <2 x i64> %4, <2 x i64>* %addr, align 8
+  store <2 x i64> %4, ptr %addr, align 8
   %5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0
   ret <2 x i64> %5
 }
@@ -256,7 +256,7 @@ entry:
 declare <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32)
 declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64>, i32, <2 x i1>)
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_base_wb_z_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -266,12 +266,12 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 656, <2 x i1> %2)
   %4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1
-  store <2 x i64> %4, <2 x i64>* %addr, align 8
+  store <2 x i64> %4, ptr %addr, align 8
   %5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0
   ret <2 x i64> %5
 }
@@ -308,31 +308,31 @@ entry:
   ret <2 x i64> %2
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_s64(i64* %base, <2 x i64> %offset) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_s64(ptr %base, <2 x i64> %offset) {
 ; CHECK-LABEL: test_vldrdq_gather_offset_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrd.u64 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0i64.v2i64(i64* %base, <2 x i64> %offset, i32 64, i32 0, i32 0)
+  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0.v2i64(ptr %base, <2 x i64> %offset, i32 64, i32 0, i32 0)
   ret <2 x i64> %0
 }
 
-declare <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0i64.v2i64(i64*, <2 x i64>, i32, i32, i32)
+declare <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0.v2i64(ptr, <2 x i64>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_u64(i64* %base, <2 x i64> %offset) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_u64(ptr %base, <2 x i64> %offset) {
 ; CHECK-LABEL: test_vldrdq_gather_offset_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrd.u64 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0i64.v2i64(i64* %base, <2 x i64> %offset, i32 64, i32 0, i32 1)
+  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0.v2i64(ptr %base, <2 x i64> %offset, i32 64, i32 0, i32 1)
   ret <2 x i64> %0
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_s64(i64* %base, <2 x i64> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_s64(ptr %base, <2 x i64> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_offset_z_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -343,13 +343,13 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_s64(i64* %base, <2
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, i32 64, i32 0, i32 0, <2 x i1> %1)
+  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v2i1(ptr %base, <2 x i64> %offset, i32 64, i32 0, i32 0, <2 x i1> %1)
   ret <2 x i64> %2
 }
 
-declare <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v2i1(i64*, <2 x i64>, i32, i32, i32, <2 x i1>)
+declare <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v2i1(ptr, <2 x i64>, i32, i32, i32, <2 x i1>)
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_u64(i64* %base, <2 x i64> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_u64(ptr %base, <2 x i64> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_offset_z_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -360,33 +360,33 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_offset_z_u64(i64* %base, <2
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, i32 64, i32 0, i32 1, <2 x i1> %1)
+  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v2i1(ptr %base, <2 x i64> %offset, i32 64, i32 0, i32 1, <2 x i1> %1)
   ret <2 x i64> %2
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_s64(i64* %base, <2 x i64> %offset) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_s64(ptr %base, <2 x i64> %offset) {
 ; CHECK-LABEL: test_vldrdq_gather_shifted_offset_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrd.u64 q1, [r0, q0, uxtw #3]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0i64.v2i64(i64* %base, <2 x i64> %offset, i32 64, i32 3, i32 0)
+  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0.v2i64(ptr %base, <2 x i64> %offset, i32 64, i32 3, i32 0)
   ret <2 x i64> %0
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_u64(i64* %base, <2 x i64> %offset) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_u64(ptr %base, <2 x i64> %offset) {
 ; CHECK-LABEL: test_vldrdq_gather_shifted_offset_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrd.u64 q1, [r0, q0, uxtw #3]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0i64.v2i64(i64* %base, <2 x i64> %offset, i32 64, i32 3, i32 1)
+  %0 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.v2i64.p0.v2i64(ptr %base, <2 x i64> %offset, i32 64, i32 3, i32 1)
   ret <2 x i64> %0
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_s64(i64* %base, <2 x i64> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_s64(ptr %base, <2 x i64> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_shifted_offset_z_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -397,11 +397,11 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_s64(i64* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, i32 64, i32 3, i32 0, <2 x i1> %1)
+  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v2i1(ptr %base, <2 x i64> %offset, i32 64, i32 3, i32 0, <2 x i1> %1)
   ret <2 x i64> %2
 }
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_u64(i64* %base, <2 x i64> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_u64(ptr %base, <2 x i64> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_shifted_offset_z_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -412,72 +412,70 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_shifted_offset_z_u64(i64* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, i32 64, i32 3, i32 1, <2 x i1> %1)
+  %2 = call <2 x i64> @llvm.arm.mve.vldr.gather.offset.predicated.v2i64.p0.v2i64.v2i1(ptr %base, <2 x i64> %offset, i32 64, i32 3, i32 1, <2 x i1> %1)
   ret <2 x i64> %2
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_offset_f16(half* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_offset_f16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0f16.v8i16(half* %base, <8 x i16> %offset, i32 16, i32 0, i32 0)
+  %0 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 0)
   ret <8 x half> %0
 }
 
-declare <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0f16.v8i16(half*, <8 x i16>, i32, i32, i32)
+declare <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0.v8i16(ptr, <8 x i16>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_s16(i16* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_s16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i16.v8i16(i16* %base, <8 x i16> %offset, i32 16, i32 0, i32 0)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 0)
   ret <8 x i16> %0
 }
 
-declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i16.v8i16(i16*, <8 x i16>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_s32(i16* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_s32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i16.v4i32(i16* %base, <4 x i32> %offset, i32 16, i32 0, i32 0)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 16, i32 0, i32 0)
   ret <4 x i32> %0
 }
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i16.v4i32(i16*, <4 x i32>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_u16(i16* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_u16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i16.v8i16(i16* %base, <8 x i16> %offset, i32 16, i32 0, i32 1)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 1)
   ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_u32(i16* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_u32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i16.v4i32(i16* %base, <4 x i32> %offset, i32 16, i32 0, i32 1)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 16, i32 0, i32 1)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_offset_z_f16(half* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_offset_z_f16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_z_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -488,13 +486,13 @@ define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_offset_z_f16(half* %base,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0f16.v8i16.v8i1(half* %base, <8 x i16> %offset, i32 16, i32 0, i32 0, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 0, <8 x i1> %1)
   ret <8 x half> %2
 }
 
-declare <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0f16.v8i16.v8i1(half*, <8 x i16>, i32, i32, i32, <8 x i1>)
+declare <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0.v8i16.v8i1(ptr, <8 x i16>, i32, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_s16(i16* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_s16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -505,13 +503,12 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_s16(i16* %base, <8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, i32 16, i32 0, i32 0, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 0, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
-declare <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i16.v8i16.v8i1(i16*, <8 x i16>, i32, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_s32(i16* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -522,13 +519,12 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_s32(i16* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i16.v4i32.v4i1(i16* %base, <4 x i32> %offset, i32 16, i32 0, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 16, i32 0, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i16.v4i32.v4i1(i16*, <4 x i32>, i32, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_u16(i16* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_u16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -539,11 +535,11 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_offset_z_u16(i16* %base, <8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, i32 16, i32 0, i32 1, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 0, i32 1, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_u32(i16* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_u32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_offset_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -554,66 +550,66 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_offset_z_u32(i16* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i16.v4i32.v4i1(i16* %base, <4 x i32> %offset, i32 16, i32 0, i32 1, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 16, i32 0, i32 1, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_shifted_offset_f16(half* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_shifted_offset_f16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0f16.v8i16(half* %base, <8 x i16> %offset, i32 16, i32 1, i32 0)
+  %0 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.v8f16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 0)
   ret <8 x half> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_s16(i16* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_s16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i16.v8i16(i16* %base, <8 x i16> %offset, i32 16, i32 1, i32 0)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 0)
   ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_s32(i16* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_s32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i16.v4i32(i16* %base, <4 x i32> %offset, i32 16, i32 1, i32 0)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 16, i32 1, i32 0)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_u16(i16* %base, <8 x i16> %offset) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_u16(ptr %base, <8 x i16> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0i16.v8i16(i16* %base, <8 x i16> %offset, i32 16, i32 1, i32 1)
+  %0 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.v8i16.p0.v8i16(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 1)
   ret <8 x i16> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_u32(i16* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_u32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i16.v4i32(i16* %base, <4 x i32> %offset, i32 16, i32 1, i32 1)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 16, i32 1, i32 1)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_shifted_offset_z_f16(half* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_shifted_offset_z_f16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_z_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -624,11 +620,11 @@ define arm_aapcs_vfpcc <8 x half> @test_vldrhq_gather_shifted_offset_z_f16(half*
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0f16.v8i16.v8i1(half* %base, <8 x i16> %offset, i32 16, i32 1, i32 0, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vldr.gather.offset.predicated.v8f16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 0, <8 x i1> %1)
   ret <8 x half> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_s16(i16* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_s16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_z_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -639,11 +635,11 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_s16(i16* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, i32 16, i32 1, i32 0, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 0, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_s32(i16* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -654,11 +650,11 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_s32(i16* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i16.v4i32.v4i1(i16* %base, <4 x i32> %offset, i32 16, i32 1, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 16, i32 1, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_u16(i16* %base, <8 x i16> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_u16(ptr %base, <8 x i16> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_z_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -669,11 +665,11 @@ define arm_aapcs_vfpcc <8 x i16> @test_vldrhq_gather_shifted_offset_z_u16(i16* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, i32 16, i32 1, i32 1, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vldr.gather.offset.predicated.v8i16.p0.v8i16.v8i1(ptr %base, <8 x i16> %offset, i32 16, i32 1, i32 1, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_u32(i16* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_u32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrhq_gather_shifted_offset_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -684,7 +680,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrhq_gather_shifted_offset_z_u32(i16* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i16.v4i32.v4i1(i16* %base, <4 x i32> %offset, i32 16, i32 1, i32 1, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 16, i32 1, i32 1, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -725,7 +721,7 @@ entry:
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %addr) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(ptr %addr) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -733,17 +729,17 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %0, i32 -64)
   %2 = extractvalue { <4 x float>, <4 x i32> } %1, 1
-  store <4 x i32> %2, <4 x i32>* %addr, align 8
+  store <4 x i32> %2, ptr %addr, align 8
   %3 = extractvalue { <4 x float>, <4 x i32> } %1, 0
   ret <4 x float> %3
 }
 
 declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %addr) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(ptr %addr) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -751,17 +747,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %add
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 80)
   %2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1
-  store <4 x i32> %2, <4 x i32>* %addr, align 8
+  store <4 x i32> %2, ptr %addr, align 8
   %3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0
   ret <4 x i32> %3
 }
 
 declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32>, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_u32(<4 x i32>* %addr) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_u32(ptr %addr) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -769,15 +765,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_u32(<4 x i32>* %add
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 480)
   %2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1
-  store <4 x i32> %2, <4 x i32>* %addr, align 8
+  store <4 x i32> %2, ptr %addr, align 8
   %3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_z_f32(<4 x i32>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_z_f32(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_z_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -787,19 +783,19 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_z_f32(<4 x i32>*
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4f32.v4i32.v4i1(<4 x i32> %0, i32 -352, <4 x i1> %2)
   %4 = extractvalue { <4 x float>, <4 x i32> } %3, 1
-  store <4 x i32> %4, <4 x i32>* %addr, align 8
+  store <4 x i32> %4, ptr %addr, align 8
   %5 = extractvalue { <4 x float>, <4 x i32> } %3, 0
   ret <4 x float> %5
 }
 
 declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4f32.v4i32.v4i1(<4 x i32>, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_s32(<4 x i32>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_s32(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -809,19 +805,19 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_s32(<4 x i32>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32> %0, i32 276, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, <4 x i32> } %3, 1
-  store <4 x i32> %4, <4 x i32>* %addr, align 8
+  store <4 x i32> %4, ptr %addr, align 8
   %5 = extractvalue { <4 x i32>, <4 x i32> } %3, 0
   ret <4 x i32> %5
 }
 
 declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32>, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_u32(<4 x i32>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_u32(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -831,12 +827,12 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_z_u32(<4 x i32>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32> %0, i32 88, <4 x i1> %2)
   %4 = extractvalue { <4 x i32>, <4 x i32> } %3, 1
-  store <4 x i32> %4, <4 x i32>* %addr, align 8
+  store <4 x i32> %4, ptr %addr, align 8
   %5 = extractvalue { <4 x i32>, <4 x i32> } %3, 0
   ret <4 x i32> %5
 }
@@ -890,44 +886,43 @@ entry:
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_offset_f32(float* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_offset_f32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0f32.v4i32(float* %base, <4 x i32> %offset, i32 32, i32 0, i32 0)
+  %0 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 0)
   ret <4 x float> %0
 }
 
-declare <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0f32.v4i32(float*, <4 x i32>, i32, i32, i32)
+declare <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0.v4i32(ptr, <4 x i32>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_s32(i32* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_s32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %base, <4 x i32> %offset, i32 32, i32 0, i32 0)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 0)
   ret <4 x i32> %0
 }
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32*, <4 x i32>, i32, i32, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_u32(i32* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_u32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %base, <4 x i32> %offset, i32 32, i32 0, i32 1)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 1)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_offset_z_f32(float* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_offset_z_f32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_z_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -938,13 +933,13 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_offset_z_f32(float* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0f32.v4i32.v4i1(float* %base, <4 x i32> %offset, i32 32, i32 0, i32 0, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 0, <4 x i1> %1)
   ret <4 x float> %2
 }
 
-declare <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0f32.v4i32.v4i1(float*, <4 x i32>, i32, i32, i32, <4 x i1>)
+declare <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0.v4i32.v4i1(ptr, <4 x i32>, i32, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_s32(i32* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -955,13 +950,12 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_s32(i32* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, i32 32, i32 0, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-declare <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i32.v4i32.v4i1(i32*, <4 x i32>, i32, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_u32(i32* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_u32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_offset_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -972,44 +966,44 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_offset_z_u32(i32* %base, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, i32 32, i32 0, i32 1, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 0, i32 1, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_shifted_offset_f32(float* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_shifted_offset_f32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0f32.v4i32(float* %base, <4 x i32> %offset, i32 32, i32 2, i32 0)
+  %0 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.v4f32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 0)
   ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_s32(i32* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_s32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %base, <4 x i32> %offset, i32 32, i32 2, i32 0)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 0)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_u32(i32* %base, <4 x i32> %offset) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_u32(ptr %base, <4 x i32> %offset) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0i32.v4i32(i32* %base, <4 x i32> %offset, i32 32, i32 2, i32 1)
+  %0 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 1)
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_shifted_offset_z_f32(float* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_shifted_offset_z_f32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_z_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1020,11 +1014,11 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_shifted_offset_z_f32(floa
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0f32.v4i32.v4i1(float* %base, <4 x i32> %offset, i32 32, i32 2, i32 0, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vldr.gather.offset.predicated.v4f32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 0, <4 x i1> %1)
   ret <4 x float> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_s32(i32* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_s32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_z_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1035,11 +1029,11 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_s32(i32* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, i32 32, i32 2, i32 0, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 0, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_u32(i32* %base, <4 x i32> %offset, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_u32(ptr %base, <4 x i32> %offset, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrwq_gather_shifted_offset_z_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1050,11 +1044,11 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_shifted_offset_z_u32(i32* %
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, i32 32, i32 2, i32 1, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.predicated.v4i32.p0.v4i32.v4i1(ptr %base, <4 x i32> %offset, i32 32, i32 2, i32 1, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s16(i8* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1064,13 +1058,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s16(i8* %base, <8 x i1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v8i16.v8i16.v8i1(i8* %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0, <8 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v8i16.v8i16.v8i1(i8*, <8 x i16>, <8 x i16>, i32, i32, <8 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr, <8 x i16>, <8 x i16>, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s32(i8* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1080,13 +1074,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s32(i8* %base, <4 x i3
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v4i32.v4i32.v4i1(i8* %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v4i32.v4i32.v4i1(i8*, <4 x i32>, <4 x i32>, i32, i32, <4 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr, <4 x i32>, <4 x i32>, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s8(i8* %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s8(ptr %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1096,13 +1090,13 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_s8(i8* %base, <16 x i8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v16i8.v16i8.v16i1(i8* %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v16i8.v16i8.v16i1(ptr %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v16i8.v16i8.v16i1(i8*, <16 x i8>, <16 x i8>, i32, i32, <16 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v16i8.v16i8.v16i1(ptr, <16 x i8>, <16 x i8>, i32, i32, <16 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u16(i8* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1112,11 +1106,11 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u16(i8* %base, <8 x i1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v8i16.v8i16.v8i1(i8* %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u32(i8* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1126,11 +1120,11 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u32(i8* %base, <4 x i3
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v4i32.v4i32.v4i1(i8* %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u8(i8* %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u8(ptr %base, <16 x i8> %offset, <16 x i8> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_p_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1140,73 +1134,73 @@ define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_p_u8(i8* %base, <16 x i8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i8.v16i8.v16i8.v16i1(i8* %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v16i8.v16i8.v16i1(ptr %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0, <16 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s16(i8* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.16 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v8i16.v8i16(i8* %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i8.v8i16.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr, <8 x i16>, <8 x i16>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s32(i8* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v4i32.v4i32(i8* %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i8.v4i32.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr, <4 x i32>, <4 x i32>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s8(i8* %base, <16 x i8> %offset, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_s8(ptr %base, <16 x i8> %offset, <16 x i8> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v16i8.v16i8(i8* %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v16i8.v16i8(ptr %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i8.v16i8.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v16i8.v16i8(ptr, <16 x i8>, <16 x i8>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u16(i8* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.16 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v8i16.v8i16(i8* %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 8, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u32(i8* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v4i32.v4i32(i8* %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 8, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u8(i8* %base, <16 x i8> %offset, <16 x i8> %value) {
+define arm_aapcs_vfpcc void @test_vstrbq_scatter_offset_u8(ptr %base, <16 x i8> %offset, <16 x i8> %value) {
 ; CHECK-LABEL: test_vstrbq_scatter_offset_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrb.8 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i8.v16i8.v16i8(i8* %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v16i8.v16i8(ptr %base, <16 x i8> %offset, <16 x i8> %value, i32 8, i32 0)
   ret void
 }
 
@@ -1262,7 +1256,7 @@ entry:
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_s64(<2 x i64>* %addr, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_s64(ptr %addr, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_base_wb_p_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1272,17 +1266,17 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_s64(<2 x i64>* %addr,
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 248, <2 x i64> %value, <2 x i1> %2)
-  store <2 x i64> %3, <2 x i64>* %addr, align 8
+  store <2 x i64> %3, ptr %addr, align 8
   ret void
 }
 
 declare <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64>, i32, <2 x i64>, <2 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_u64(<2 x i64>* %addr, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_u64(ptr %addr, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_base_wb_p_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1292,15 +1286,15 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_p_u64(<2 x i64>* %addr,
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 136, <2 x i64> %value, <2 x i1> %2)
-  store <2 x i64> %3, <2 x i64>* %addr, align 8
+  store <2 x i64> %3, ptr %addr, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_s64(<2 x i64>* %addr, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_s64(ptr %addr, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_base_wb_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1308,15 +1302,15 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_s64(<2 x i64>* %addr, <
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.v2i64.v2i64(<2 x i64> %0, i32 208, <2 x i64> %value)
-  store <2 x i64> %1, <2 x i64>* %addr, align 8
+  store <2 x i64> %1, ptr %addr, align 8
   ret void
 }
 
 declare <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.v2i64.v2i64(<2 x i64>, i32, <2 x i64>)
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_u64(<2 x i64>* %addr, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_u64(ptr %addr, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_base_wb_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1324,13 +1318,13 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_base_wb_u64(<2 x i64>* %addr, <
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = call <2 x i64> @llvm.arm.mve.vstr.scatter.base.wb.v2i64.v2i64(<2 x i64> %0, i32 -168, <2 x i64> %value)
-  store <2 x i64> %1, <2 x i64>* %addr, align 8
+  store <2 x i64> %1, ptr %addr, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_s64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_s64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_offset_p_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1340,13 +1334,13 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_s64(i64* %base, <2 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0, <2 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v2i1(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0, <2 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v2i1(i64*, <2 x i64>, <2 x i64>, i32, i32, <2 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v2i1(ptr, <2 x i64>, <2 x i64>, i32, i32, <2 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_u64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_u64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_offset_p_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1356,33 +1350,33 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_p_u64(i64* %base, <2 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0, <2 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v2i1(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0, <2 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_s64(i64* %base, <2 x i64> %offset, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_s64(ptr %base, <2 x i64> %offset, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_offset_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrd.64 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i64.v2i64.v2i64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v2i64.v2i64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i64.v2i64.v2i64(i64*, <2 x i64>, <2 x i64>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v2i64.v2i64(ptr, <2 x i64>, <2 x i64>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_u64(i64* %base, <2 x i64> %offset, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_offset_u64(ptr %base, <2 x i64> %offset, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_offset_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrd.64 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i64.v2i64.v2i64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v2i64.v2i64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_s64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_s64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_shifted_offset_p_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1392,11 +1386,11 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_s64(i64* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3, <2 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v2i1(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3, <2 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_u64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_u64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrdq_scatter_shifted_offset_p_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1406,43 +1400,43 @@ define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_p_u64(i64* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v2i1(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3, <2 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v2i64.v2i64.v2i1(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3, <2 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_s64(i64* %base, <2 x i64> %offset, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_s64(ptr %base, <2 x i64> %offset, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_shifted_offset_s64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrd.64 q1, [r0, q0, uxtw #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i64.v2i64.v2i64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v2i64.v2i64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_u64(i64* %base, <2 x i64> %offset, <2 x i64> %value) {
+define arm_aapcs_vfpcc void @test_vstrdq_scatter_shifted_offset_u64(ptr %base, <2 x i64> %offset, <2 x i64> %value) {
 ; CHECK-LABEL: test_vstrdq_scatter_shifted_offset_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrd.64 q1, [r0, q0, uxtw #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i64.v2i64.v2i64(i64* %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v2i64.v2i64(ptr %base, <2 x i64> %offset, <2 x i64> %value, i32 64, i32 3)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_f16(half* %base, <8 x i16> %offset, <8 x half> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_f16(ptr %base, <8 x i16> %offset, <8 x half> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0f16.v8i16.v8f16(half* %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8f16(ptr %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0f16.v8i16.v8f16(half*, <8 x i16>, <8 x half>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8f16(ptr, <8 x i16>, <8 x half>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_f16(half* %base, <8 x i16> %offset, <8 x half> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_f16(ptr %base, <8 x i16> %offset, <8 x half> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_p_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1452,13 +1446,13 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_f16(half* %base, <8 x
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f16.v8i16.v8f16.v8i1(half* %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 0, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8f16.v8i1(ptr %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 0, <8 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f16.v8i16.v8f16.v8i1(half*, <8 x i16>, <8 x half>, i32, i32, <8 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8f16.v8i1(ptr, <8 x i16>, <8 x half>, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1468,13 +1462,12 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s16(i16* %base, <8 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v8i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0, <8 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v8i16.v8i16.v8i1(i16*, <8 x i16>, <8 x i16>, i32, i32, <8 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1484,13 +1477,12 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_s32(i16* %base, <4 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v4i32.v4i32.v4i1(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v4i32.v4i32.v4i1(i16*, <4 x i32>, <4 x i32>, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1500,11 +1492,11 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u16(i16* %base, <8 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v8i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1514,65 +1506,63 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_p_u32(i16* %base, <4 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v4i32.v4i32.v4i1(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_s16(i16* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v8i16.v8i16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i16.v8i16.v8i16(i16*, <8 x i16>, <8 x i16>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_s32(i16* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v4i32.v4i32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i16.v4i32.v4i32(i16*, <4 x i32>, <4 x i32>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_u16(i16* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v8i16.v8i16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_u32(i16* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_offset_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v4i32.v4i32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_f16(half* %base, <8 x i16> %offset, <8 x half> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_f16(ptr %base, <8 x i16> %offset, <8 x half> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0f16.v8i16.v8f16(half* %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 1)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8f16(ptr %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_f16(half* %base, <8 x i16> %offset, <8 x half> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_f16(ptr %base, <8 x i16> %offset, <8 x half> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_p_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1582,11 +1572,11 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_f16(half* %bas
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f16.v8i16.v8f16.v8i1(half* %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 1, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8f16.v8i1(ptr %base, <8 x i16> %offset, <8 x half> %value, i32 16, i32 1, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_p_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1596,11 +1586,11 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s16(i16* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v8i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1610,11 +1600,11 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_s32(i16* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v4i32.v4i32.v4i1(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_p_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1624,11 +1614,11 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u16(i16* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v8i16.v8i16.v8i1(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1, <8 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v8i16.v8i16.v8i1(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1, <8 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1638,47 +1628,47 @@ define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_p_u32(i16* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i16.v4i32.v4i32.v4i1(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_s16(i16* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_s16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v8i16.v8i16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_s32(i16* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v4i32.v4i32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_u16(i16* %base, <8 x i16> %offset, <8 x i16> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_u16(ptr %base, <8 x i16> %offset, <8 x i16> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.16 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v8i16.v8i16(i16* %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v8i16.v8i16(ptr %base, <8 x i16> %offset, <8 x i16> %value, i32 16, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_u32(i16* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrhq_scatter_shifted_offset_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrhq_scatter_shifted_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrh.32 q1, [r0, q0, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i16.v4i32.v4i32(i16* %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 16, i32 1)
   ret void
 }
 
@@ -1762,7 +1752,7 @@ entry:
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_f32(<4 x i32>* %addr, <4 x float> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_f32(ptr %addr, <4 x float> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1770,15 +1760,15 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_f32(<4 x i32>* %addr, <
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4f32(<4 x i32> %0, i32 -412, <4 x float> %value)
-  store <4 x i32> %1, <4 x i32>* %addr, align 8
+  store <4 x i32> %1, ptr %addr, align 8
   ret void
 }
 
 declare <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4f32(<4 x i32>, i32, <4 x float>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_f32(<4 x i32>* %addr, <4 x float> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_f32(ptr %addr, <4 x float> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_p_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1788,17 +1778,17 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_f32(<4 x i32>* %addr,
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v4i32.v4f32.v4i1(<4 x i32> %0, i32 236, <4 x float> %value, <4 x i1> %2)
-  store <4 x i32> %3, <4 x i32>* %addr, align 8
+  store <4 x i32> %3, ptr %addr, align 8
   ret void
 }
 
 declare <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v4i32.v4f32.v4i1(<4 x i32>, i32, <4 x float>, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_s32(<4 x i32>* %addr, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_s32(ptr %addr, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1808,17 +1798,17 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_s32(<4 x i32>* %addr,
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32> %0, i32 328, <4 x i32> %value, <4 x i1> %2)
-  store <4 x i32> %3, <4 x i32>* %addr, align 8
+  store <4 x i32> %3, ptr %addr, align 8
   ret void
 }
 
 declare <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32>, i32, <4 x i32>, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_u32(<4 x i32>* %addr, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_u32(ptr %addr, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1828,15 +1818,15 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_p_u32(<4 x i32>* %addr,
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.predicated.v4i32.v4i32.v4i1(<4 x i32> %0, i32 412, <4 x i32> %value, <4 x i1> %2)
-  store <4 x i32> %3, <4 x i32>* %addr, align 8
+  store <4 x i32> %3, ptr %addr, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_s32(<4 x i32>* %addr, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_s32(ptr %addr, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1844,15 +1834,15 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_s32(<4 x i32>* %addr, <
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4i32(<4 x i32> %0, i32 -152, <4 x i32> %value)
-  store <4 x i32> %1, <4 x i32>* %addr, align 8
+  store <4 x i32> %1, ptr %addr, align 8
   ret void
 }
 
 declare <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4i32(<4 x i32>, i32, <4 x i32>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_u32(<4 x i32>* %addr, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_u32(ptr %addr, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_base_wb_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -1860,25 +1850,25 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_base_wb_u32(<4 x i32>* %addr, <
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = call <4 x i32> @llvm.arm.mve.vstr.scatter.base.wb.v4i32.v4i32(<4 x i32> %0, i32 64, <4 x i32> %value)
-  store <4 x i32> %1, <4 x i32>* %addr, align 8
+  store <4 x i32> %1, ptr %addr, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_f32(float* %base, <4 x i32> %offset, <4 x float> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_f32(ptr %base, <4 x i32> %offset, <4 x float> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0f32.v4i32.v4f32(float* %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4f32(ptr %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0f32.v4i32.v4f32(float*, <4 x i32>, <4 x float>, i32, i32)
+declare void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4f32(ptr, <4 x i32>, <4 x float>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_f32(float* %base, <4 x i32> %offset, <4 x float> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_f32(ptr %base, <4 x i32> %offset, <4 x float> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_p_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1888,13 +1878,13 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_f32(float* %base, <4 x
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f32.v4i32.v4f32.v4i1(float* %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4f32.v4i1(ptr %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 0, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f32.v4i32.v4f32.v4i1(float*, <4 x i32>, <4 x float>, i32, i32, <4 x i1>)
+declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4f32.v4i1(ptr, <4 x i32>, <4 x float>, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_s32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1904,13 +1894,12 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_s32(i32* %base, <4 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i32.v4i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i32.v4i32.v4i32.v4i1(i32*, <4 x i32>, <4 x i32>, i32, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_u32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1920,43 +1909,42 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_p_u32(i32* %base, <4 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i32.v4i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_s32(i32* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i32.v4i32.v4i32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0)
   ret void
 }
 
-declare void @llvm.arm.mve.vstr.scatter.offset.p0i32.v4i32.v4i32(i32*, <4 x i32>, <4 x i32>, i32, i32)
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_u32(i32* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_offset_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i32.v4i32.v4i32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 0)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_f32(float* %base, <4 x i32> %offset, <4 x float> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_f32(ptr %base, <4 x i32> %offset, <4 x float> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0f32.v4i32.v4f32(float* %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 2)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4f32(ptr %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 2)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_f32(float* %base, <4 x i32> %offset, <4 x float> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_f32(ptr %base, <4 x i32> %offset, <4 x float> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_p_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1966,11 +1954,11 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_f32(float* %ba
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0f32.v4i32.v4f32.v4i1(float* %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 2, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4f32.v4i1(ptr %base, <4 x i32> %offset, <4 x float> %value, i32 32, i32 2, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_s32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_p_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1980,11 +1968,11 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_s32(i32* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i32.v4i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_u32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i16 zeroext %p) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_p_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -1994,26 +1982,26 @@ define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_p_u32(i32* %base
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0i32.v4i32.v4i32.v4i1(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2, <4 x i1> %1)
+  call void @llvm.arm.mve.vstr.scatter.offset.predicated.p0.v4i32.v4i32.v4i1(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2, <4 x i1> %1)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_s32(i32* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_s32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i32.v4i32.v4i32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2)
   ret void
 }
 
-define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_u32(i32* %base, <4 x i32> %offset, <4 x i32> %value) {
+define arm_aapcs_vfpcc void @test_vstrwq_scatter_shifted_offset_u32(ptr %base, <4 x i32> %offset, <4 x i32> %value) {
 ; CHECK-LABEL: test_vstrwq_scatter_shifted_offset_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vstrw.32 q1, [r0, q0, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  call void @llvm.arm.mve.vstr.scatter.offset.p0i32.v4i32.v4i32(i32* %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2)
+  call void @llvm.arm.mve.vstr.scatter.offset.p0.v4i32.v4i32(ptr %base, <4 x i32> %offset, <4 x i32> %value, i32 32, i32 2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll
index 7ec1d9153f49f..967e8a94900e1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @test_vadciq_s32(<4 x i32> %a, <4 x i32> %b, i32* %carry_out) {
+define arm_aapcs_vfpcc <4 x i32> @test_vadciq_s32(<4 x i32> %a, <4 x i32> %b, ptr %carry_out) {
 ; CHECK-LABEL: test_vadciq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vadci.i32 q0, q0, q1
@@ -14,14 +14,14 @@ entry:
   %1 = extractvalue { <4 x i32>, i32 } %0, 1
   %2 = lshr i32 %1, 29
   %3 = and i32 %2, 1
-  store i32 %3, i32* %carry_out, align 4
+  store i32 %3, ptr %carry_out, align 4
   %4 = extractvalue { <4 x i32>, i32 } %0, 0
   ret <4 x i32> %4
 }
 
 declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32>, <4 x i32>, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vadcq_u32(<4 x i32> %a, <4 x i32> %b, i32* %carry) {
+define arm_aapcs_vfpcc <4 x i32> @test_vadcq_u32(<4 x i32> %a, <4 x i32> %b, ptr %carry) {
 ; CHECK-LABEL: test_vadcq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -33,18 +33,18 @@ define arm_aapcs_vfpcc <4 x i32> @test_vadcq_u32(<4 x i32> %a, <4 x i32> %b, i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry, align 4
+  store i32 %5, ptr %carry, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vadciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry_out, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vadciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr %carry_out, i16 zeroext %p) {
 ; CHECK-LABEL: test_vadciq_m_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -61,7 +61,7 @@ entry:
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry_out, align 4
+  store i32 %5, ptr %carry_out, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
@@ -70,7 +70,7 @@ declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
 
 declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vadcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vadcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr %carry, i16 zeroext %p) {
 ; CHECK-LABEL: test_vadcq_m_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -84,7 +84,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vadcq_m_s32(<4 x i32> %inactive, <4 x i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = zext i16 %p to i32
   %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
@@ -92,14 +92,14 @@ entry:
   %5 = extractvalue { <4 x i32>, i32 } %4, 1
   %6 = lshr i32 %5, 29
   %7 = and i32 %6, 1
-  store i32 %7, i32* %carry, align 4
+  store i32 %7, ptr %carry, align 4
   %8 = extractvalue { <4 x i32>, i32 } %4, 0
   ret <4 x i32> %8
 }
 
 declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32>, <4 x i32>, i32)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_s32(<4 x i32> %a, <4 x i32> %b, ptr nocapture %carry_out) {
 ; CHECK-LABEL: test_vsbciq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vsbci.i32 q0, q0, q1
@@ -112,12 +112,12 @@ entry:
   %1 = extractvalue { <4 x i32>, i32 } %0, 1
   %2 = lshr i32 %1, 29
   %3 = and i32 %2, 1
-  store i32 %3, i32* %carry_out, align 4
+  store i32 %3, ptr %carry_out, align 4
   %4 = extractvalue { <4 x i32>, i32 } %0, 0
   ret <4 x i32> %4
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_u32(<4 x i32> %a, <4 x i32> %b, ptr nocapture %carry_out) {
 ; CHECK-LABEL: test_vsbciq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vsbci.i32 q0, q0, q1
@@ -130,12 +130,12 @@ entry:
   %1 = extractvalue { <4 x i32>, i32 } %0, 1
   %2 = lshr i32 %1, 29
   %3 = and i32 %2, 1
-  store i32 %3, i32* %carry_out, align 4
+  store i32 %3, ptr %carry_out, align 4
   %4 = extractvalue { <4 x i32>, i32 } %0, 0
   ret <4 x i32> %4
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_s32(<4 x i32> %a, <4 x i32> %b, ptr nocapture %carry) {
 ; CHECK-LABEL: test_vsbcq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -147,18 +147,18 @@ define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_s32(<4 x i32> %a, <4 x i32> %b, i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry, align 4
+  store i32 %5, ptr %carry, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_u32(<4 x i32> %a, <4 x i32> %b, ptr nocapture %carry) {
 ; CHECK-LABEL: test_vsbcq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -170,20 +170,20 @@ define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_u32(<4 x i32> %a, <4 x i32> %b, i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry, align 4
+  store i32 %5, ptr %carry, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
 
 declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>)
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr nocapture %carry_out, i16 zeroext %p) {
 ; CHECK-LABEL: test_vsbciq_m_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -200,12 +200,12 @@ entry:
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry_out, align 4
+  store i32 %5, ptr %carry_out, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr nocapture %carry_out, i16 zeroext %p) {
 ; CHECK-LABEL: test_vsbciq_m_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -222,12 +222,12 @@ entry:
   %3 = extractvalue { <4 x i32>, i32 } %2, 1
   %4 = lshr i32 %3, 29
   %5 = and i32 %4, 1
-  store i32 %5, i32* %carry_out, align 4
+  store i32 %5, ptr %carry_out, align 4
   %6 = extractvalue { <4 x i32>, i32 } %2, 0
   ret <4 x i32> %6
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr nocapture %carry, i16 zeroext %p) {
 ; CHECK-LABEL: test_vsbcq_m_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -241,7 +241,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_s32(<4 x i32> %inactive, <4 x i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = zext i16 %p to i32
   %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
@@ -249,12 +249,12 @@ entry:
   %5 = extractvalue { <4 x i32>, i32 } %4, 1
   %6 = lshr i32 %5, 29
   %7 = and i32 %6, 1
-  store i32 %7, i32* %carry, align 4
+  store i32 %7, ptr %carry, align 4
   %8 = extractvalue { <4 x i32>, i32 } %4, 0
   ret <4 x i32> %8
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, ptr nocapture %carry, i16 zeroext %p) {
 ; CHECK-LABEL: test_vsbcq_m_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -268,7 +268,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_u32(<4 x i32> %inactive, <4 x i32
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %carry, align 4
+  %0 = load i32, ptr %carry, align 4
   %1 = shl i32 %0, 29
   %2 = zext i16 %p to i32
   %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
@@ -276,7 +276,7 @@ entry:
   %5 = extractvalue { <4 x i32>, i32 } %4, 1
   %6 = lshr i32 %5, 29
   %7 = and i32 %6, 1
-  store i32 %7, i32* %carry, align 4
+  store i32 %7, ptr %carry, align 4
   %8 = extractvalue { <4 x i32>, i32 } %4, 0
   ret <4 x i32> %8
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll
index 917cec927a993..6e8b713b1922c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll
@@ -6,14 +6,14 @@
 %struct.uint32x4x2_t = type { [2 x <4 x i32>] }
 %struct.int8x16x4_t = type { [4 x <16 x i8>] }
 
-define arm_aapcs_vfpcc %struct.float16x8x2_t @test_vld2q_f16(half* %addr) {
+define arm_aapcs_vfpcc %struct.float16x8x2_t @test_vld2q_f16(ptr %addr) {
 ; CHECK-LABEL: test_vld2q_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
 ; CHECK-NEXT:    vld21.16 {q0, q1}, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* %addr)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr %addr)
   %1 = extractvalue { <8 x half>, <8 x half> } %0, 0
   %2 = insertvalue %struct.float16x8x2_t undef, <8 x half> %1, 0, 0
   %3 = extractvalue { <8 x half>, <8 x half> } %0, 1
@@ -21,7 +21,7 @@ entry:
   ret %struct.float16x8x2_t %4
 }
 
-define arm_aapcs_vfpcc half *@test_vld2q_f16_post(half* %addr, <8 x half>* %dst) {
+define arm_aapcs_vfpcc ptr @test_vld2q_f16_post(ptr %addr, ptr %dst) {
 ; CHECK-LABEL: test_vld2q_f16_post:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -29,16 +29,16 @@ define arm_aapcs_vfpcc half *@test_vld2q_f16_post(half* %addr, <8 x half>* %dst)
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* %addr)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr %addr)
   %1 = extractvalue { <8 x half>, <8 x half> } %0, 0
-  store <8 x half> %1, <8 x half> *%dst, align 4
-  %2 = getelementptr half, half *%addr, i32 16
-  ret half *%2
+  store <8 x half> %1, ptr %dst, align 4
+  %2 = getelementptr half, ptr %addr, i32 16
+  ret ptr %2
 }
 
-declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half*)
+declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr)
 
-define arm_aapcs_vfpcc %struct.uint8x16x4_t @test_vld4q_u8(i8* %addr) {
+define arm_aapcs_vfpcc %struct.uint8x16x4_t @test_vld4q_u8(ptr %addr) {
 ; CHECK-LABEL: test_vld4q_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.8 {q0, q1, q2, q3}, [r0]
@@ -47,7 +47,7 @@ define arm_aapcs_vfpcc %struct.uint8x16x4_t @test_vld4q_u8(i8* %addr) {
 ; CHECK-NEXT:    vld43.8 {q0, q1, q2, q3}, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0i8(i8* %addr)
+  %0 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0(ptr %addr)
   %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 0
   %2 = insertvalue %struct.uint8x16x4_t undef, <16 x i8> %1, 0, 0
   %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 1
@@ -59,7 +59,7 @@ entry:
   ret %struct.uint8x16x4_t %8
 }
 
-define arm_aapcs_vfpcc i8* @test_vld4q_u8_post(i8* %addr, <16 x i8>* %dst) {
+define arm_aapcs_vfpcc ptr @test_vld4q_u8_post(ptr %addr, ptr %dst) {
 ; CHECK-LABEL: test_vld4q_u8_post:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.8 {q0, q1, q2, q3}, [r0]
@@ -69,16 +69,16 @@ define arm_aapcs_vfpcc i8* @test_vld4q_u8_post(i8* %addr, <16 x i8>* %dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0i8(i8* %addr)
+  %0 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0(ptr %addr)
   %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 0
-  store <16 x i8> %1, <16 x i8> *%dst, align 4
-  %2 = getelementptr i8, i8 *%addr, i32 64
-  ret i8* %2
+  store <16 x i8> %1, ptr %dst, align 4
+  %2 = getelementptr i8, ptr %addr, i32 64
+  ret ptr %2
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0(ptr)
 
-define arm_aapcs_vfpcc void @test_vst2q_u32(i32* %addr, %struct.uint32x4x2_t %value.coerce) {
+define arm_aapcs_vfpcc void @test_vst2q_u32(ptr %addr, %struct.uint32x4x2_t %value.coerce) {
 ; CHECK-LABEL: test_vst2q_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst20.32 {q0, q1}, [r0]
@@ -87,12 +87,12 @@ define arm_aapcs_vfpcc void @test_vst2q_u32(i32* %addr, %struct.uint32x4x2_t %va
 entry:
   %value.coerce.fca.0.0.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 0
   %value.coerce.fca.0.1.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 1
-  tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 0)
-  tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 1)
+  tail call void @llvm.arm.mve.vst2q.p0.v4i32(ptr %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 0)
+  tail call void @llvm.arm.mve.vst2q.p0.v4i32(ptr %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc i32* @test_vst2q_u32_post(i32* %addr, %struct.uint32x4x2_t %value.coerce) {
+define arm_aapcs_vfpcc ptr @test_vst2q_u32_post(ptr %addr, %struct.uint32x4x2_t %value.coerce) {
 ; CHECK-LABEL: test_vst2q_u32_post:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst20.32 {q0, q1}, [r0]
@@ -101,15 +101,15 @@ define arm_aapcs_vfpcc i32* @test_vst2q_u32_post(i32* %addr, %struct.uint32x4x2_
 entry:
   %value.coerce.fca.0.0.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 0
   %value.coerce.fca.0.1.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 1
-  tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 0)
-  tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 1)
-  %g = getelementptr i32, i32 *%addr, i32 8
-  ret i32* %g
+  tail call void @llvm.arm.mve.vst2q.p0.v4i32(ptr %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 0)
+  tail call void @llvm.arm.mve.vst2q.p0.v4i32(ptr %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 1)
+  %g = getelementptr i32, ptr %addr, i32 8
+  ret ptr %g
 }
 
-declare void @llvm.arm.mve.vst2q.p0i32.v4i32(i32*, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.arm.mve.vst2q.p0.v4i32(ptr, <4 x i32>, <4 x i32>, i32)
 
-define arm_aapcs_vfpcc void @test_vst2q_f16(half* %addr, %struct.float16x8x2_t %value.coerce) {
+define arm_aapcs_vfpcc void @test_vst2q_f16(ptr %addr, %struct.float16x8x2_t %value.coerce) {
 ; CHECK-LABEL: test_vst2q_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst20.16 {q0, q1}, [r0]
@@ -118,12 +118,12 @@ define arm_aapcs_vfpcc void @test_vst2q_f16(half* %addr, %struct.float16x8x2_t %
 entry:
   %value.coerce.fca.0.0.extract = extractvalue %struct.float16x8x2_t %value.coerce, 0, 0
   %value.coerce.fca.0.1.extract = extractvalue %struct.float16x8x2_t %value.coerce, 0, 1
-  call void @llvm.arm.mve.vst2q.p0f16.v8f16(half* %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 0)
-  call void @llvm.arm.mve.vst2q.p0f16.v8f16(half* %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 1)
+  call void @llvm.arm.mve.vst2q.p0.v8f16(ptr %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 0)
+  call void @llvm.arm.mve.vst2q.p0.v8f16(ptr %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 1)
   ret void
 }
 
-define arm_aapcs_vfpcc half* @test_vst2q_f16_post(half* %addr, %struct.float16x8x2_t %value.coerce) {
+define arm_aapcs_vfpcc ptr @test_vst2q_f16_post(ptr %addr, %struct.float16x8x2_t %value.coerce) {
 ; CHECK-LABEL: test_vst2q_f16_post:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst20.16 {q0, q1}, [r0]
@@ -132,15 +132,15 @@ define arm_aapcs_vfpcc half* @test_vst2q_f16_post(half* %addr, %struct.float16x8
 entry:
   %value.coerce.fca.0.0.extract = extractvalue %struct.float16x8x2_t %value.coerce, 0, 0
   %value.coerce.fca.0.1.extract = extractvalue %struct.float16x8x2_t %value.coerce, 0, 1
-  call void @llvm.arm.mve.vst2q.p0f16.v8f16(half* %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 0)
-  call void @llvm.arm.mve.vst2q.p0f16.v8f16(half* %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 1)
-  %g = getelementptr half, half *%addr, i32 16
-  ret half* %g
+  call void @llvm.arm.mve.vst2q.p0.v8f16(ptr %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 0)
+  call void @llvm.arm.mve.vst2q.p0.v8f16(ptr %addr, <8 x half> %value.coerce.fca.0.0.extract, <8 x half> %value.coerce.fca.0.1.extract, i32 1)
+  %g = getelementptr half, ptr %addr, i32 16
+  ret ptr %g
 }
 
-declare void @llvm.arm.mve.vst2q.p0f16.v8f16(half*, <8 x half>, <8 x half>, i32)
+declare void @llvm.arm.mve.vst2q.p0.v8f16(ptr, <8 x half>, <8 x half>, i32)
 
-define arm_aapcs_vfpcc void @test_vst4q_s8(i8* %addr, %struct.int8x16x4_t %value.coerce) {
+define arm_aapcs_vfpcc void @test_vst4q_s8(ptr %addr, %struct.int8x16x4_t %value.coerce) {
 ; CHECK-LABEL: test_vst4q_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst40.8 {q0, q1, q2, q3}, [r0]
@@ -153,14 +153,14 @@ entry:
   %value.coerce.fca.0.1.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 1
   %value.coerce.fca.0.2.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 2
   %value.coerce.fca.0.3.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 3
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 0)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 1)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 2)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 3)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 0)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 1)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 2)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 3)
   ret void
 }
 
-define arm_aapcs_vfpcc i8* @test_vst4q_s8_post(i8* %addr, %struct.int8x16x4_t %value.coerce) {
+define arm_aapcs_vfpcc ptr @test_vst4q_s8_post(ptr %addr, %struct.int8x16x4_t %value.coerce) {
 ; CHECK-LABEL: test_vst4q_s8_post:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vst40.8 {q0, q1, q2, q3}, [r0]
@@ -173,12 +173,12 @@ entry:
   %value.coerce.fca.0.1.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 1
   %value.coerce.fca.0.2.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 2
   %value.coerce.fca.0.3.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 3
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 0)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 1)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 2)
-  tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 3)
-  %g = getelementptr i8, i8 *%addr, i32 64
-  ret i8* %g
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 0)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 1)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 2)
+  tail call void @llvm.arm.mve.vst4q.p0.v16i8(ptr %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 3)
+  %g = getelementptr i8, ptr %addr, i32 64
+  ret ptr %g
 }
 
-declare void @llvm.arm.mve.vst4q.p0i8.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.arm.mve.vst4q.p0.v16i8(ptr, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll
index 515011f1a1a77..c5f546fb7ad55 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %addr) {
+define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(ptr %addr) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -9,17 +9,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %add
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 80)
   %2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1
-  store <4 x i32> %2, <4 x i32>* %addr, align 8
+  store <4 x i32> %2, ptr %addr, align 8
   %3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0
   ret <4 x i32> %3
 }
 
 declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32>, i32)
 
-define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %addr) {
+define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(ptr %addr) {
 ; CHECK-LABEL: test_vldrwq_gather_base_wb_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -27,17 +27,17 @@ define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %addr, align 8
+  %0 = load <4 x i32>, ptr %addr, align 8
   %1 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %0, i32 64)
   %2 = extractvalue { <4 x float>, <4 x i32> } %1, 1
-  store <4 x i32> %2, <4 x i32>* %addr, align 8
+  store <4 x i32> %2, ptr %addr, align 8
   %3 = extractvalue { <4 x float>, <4 x i32> } %1, 0
   ret <4 x float> %3
 }
 
 declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32)
 
-define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %addr, i16 zeroext %p) {
+define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(ptr %addr, i16 zeroext %p) {
 ; CHECK-LABEL: test_vldrdq_gather_base_wb_z_u64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -47,12 +47,12 @@ define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %a
 ; CHECK-NEXT:    vstrw.32 q1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %addr, align 8
+  %0 = load <2 x i64>, ptr %addr, align 8
   %1 = zext i16 %p to i32
   %2 = tail call <2 x i1> @llvm.arm.mve.pred.i2v.v2i1(i32 %1)
   %3 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v2i1(<2 x i64> %0, i32 656, <2 x i1> %2)
   %4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1
-  store <2 x i64> %4, <2 x i64>* %addr, align 8
+  store <2 x i64> %4, ptr %addr, align 8
   %5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0
   ret <2 x i64> %5
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vshlc.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vshlc.ll
index cc5fd36bf066a..7f34aac4e56c2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vshlc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vshlc.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_s8(<16 x i8> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_s8(<16 x i8> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -9,15 +9,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_s8(<16 x i8> %a, i32* nocapture %b
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <16 x i8> } @llvm.arm.mve.vshlc.v16i8(<16 x i8> %a, i32 %0, i32 18)
   %2 = extractvalue { i32, <16 x i8> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <16 x i8> } %1, 1
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_s16(<8 x i16> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_s16(<8 x i16> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -25,15 +25,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_s16(<8 x i16> %a, i32* nocapture %
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %a, i32 %0, i32 16)
   %2 = extractvalue { i32, <8 x i16> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <8 x i16> } %1, 1
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_s32(<4 x i32> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_s32(<4 x i32> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -41,15 +41,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_s32(<4 x i32> %a, i32* nocapture %
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %a, i32 %0, i32 4)
   %2 = extractvalue { i32, <4 x i32> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <4 x i32> } %1, 1
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_u8(<16 x i8> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_u8(<16 x i8> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -57,15 +57,15 @@ define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_u8(<16 x i8> %a, i32* nocapture %b
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <16 x i8> } @llvm.arm.mve.vshlc.v16i8(<16 x i8> %a, i32 %0, i32 17)
   %2 = extractvalue { i32, <16 x i8> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <16 x i8> } %1, 1
   ret <16 x i8> %3
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_u16(<8 x i16> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_u16(<8 x i16> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -73,15 +73,15 @@ define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_u16(<8 x i16> %a, i32* nocapture %
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16> %a, i32 %0, i32 17)
   %2 = extractvalue { i32, <8 x i16> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <8 x i16> } %1, 1
   ret <8 x i16> %3
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_u32(<4 x i32> %a, i32* nocapture %b) {
+define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_u32(<4 x i32> %a, ptr nocapture %b) {
 ; CHECK-LABEL: test_vshlcq_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r1, [r0]
@@ -89,15 +89,15 @@ define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_u32(<4 x i32> %a, i32* nocapture %
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32> %a, i32 %0, i32 20)
   %2 = extractvalue { i32, <4 x i32> } %1, 0
-  store i32 %2, i32* %b, align 4
+  store i32 %2, ptr %b, align 4
   %3 = extractvalue { i32, <4 x i32> } %1, 1
   ret <4 x i32> %3
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_s8(<16 x i8> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_s8(<16 x i8> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -107,17 +107,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_s8(<16 x i8> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { i32, <16 x i8> } @llvm.arm.mve.vshlc.predicated.v16i8.v16i1(<16 x i8> %a, i32 %0, i32 29, <16 x i1> %2)
   %4 = extractvalue { i32, <16 x i8> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <16 x i8> } %3, 1
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_s16(<8 x i16> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_s16(<8 x i16> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -127,17 +127,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_s16(<8 x i16> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.predicated.v8i16.v8i1(<8 x i16> %a, i32 %0, i32 17, <8 x i1> %2)
   %4 = extractvalue { i32, <8 x i16> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <8 x i16> } %3, 1
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_s32(<4 x i32> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_s32(<4 x i32> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -147,17 +147,17 @@ define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_s32(<4 x i32> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.predicated.v4i32.v4i1(<4 x i32> %a, i32 %0, i32 9, <4 x i1> %2)
   %4 = extractvalue { i32, <4 x i32> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <4 x i32> } %3, 1
   ret <4 x i32> %5
 }
 
-define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_u8(<16 x i8> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_u8(<16 x i8> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -167,17 +167,17 @@ define arm_aapcs_vfpcc <16 x i8> @test_vshlcq_m_u8(<16 x i8> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %1)
   %3 = tail call { i32, <16 x i8> } @llvm.arm.mve.vshlc.predicated.v16i8.v16i1(<16 x i8> %a, i32 %0, i32 21, <16 x i1> %2)
   %4 = extractvalue { i32, <16 x i8> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <16 x i8> } %3, 1
   ret <16 x i8> %5
 }
 
-define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_u16(<8 x i16> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_u16(<8 x i16> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -187,17 +187,17 @@ define arm_aapcs_vfpcc <8 x i16> @test_vshlcq_m_u16(<8 x i16> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %1)
   %3 = tail call { i32, <8 x i16> } @llvm.arm.mve.vshlc.predicated.v8i16.v8i1(<8 x i16> %a, i32 %0, i32 24, <8 x i1> %2)
   %4 = extractvalue { i32, <8 x i16> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <8 x i16> } %3, 1
   ret <8 x i16> %5
 }
 
-define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_u32(<4 x i32> %a, i32* nocapture %b, i16 zeroext %p) {
+define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_u32(<4 x i32> %a, ptr nocapture %b, i16 zeroext %p) {
 ; CHECK-LABEL: test_vshlcq_m_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmsr p0, r1
@@ -207,12 +207,12 @@ define arm_aapcs_vfpcc <4 x i32> @test_vshlcq_m_u32(<4 x i32> %a, i32* nocapture
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   %1 = zext i16 %p to i32
   %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
   %3 = tail call { i32, <4 x i32> } @llvm.arm.mve.vshlc.predicated.v4i32.v4i1(<4 x i32> %a, i32 %0, i32 26, <4 x i1> %2)
   %4 = extractvalue { i32, <4 x i32> } %3, 0
-  store i32 %4, i32* %b, align 4
+  store i32 %4, ptr %b, align 4
   %5 = extractvalue { i32, <4 x i32> } %3, 1
   ret <4 x i32> %5
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll
index 0eb057a3c5bd4..c95fe2296e099 100644
--- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-cost.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x i32> @loads_i32(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C) {
+define arm_aapcs_vfpcc <4 x i32> @loads_i32(ptr %A, ptr %B, ptr %C) {
 ; CHECK-LABEL: loads_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -57,9 +57,9 @@ define arm_aapcs_vfpcc <4 x i32> @loads_i32(<4 x i32> *%A, <4 x i32> *%B, <4 x i
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %a = load <4 x i32>, <4 x i32> *%A, align 4
-  %b = load <4 x i32>, <4 x i32> *%B, align 4
-  %c = load <4 x i32>, <4 x i32> *%C, align 4
+  %a = load <4 x i32>, ptr %A, align 4
+  %b = load <4 x i32>, ptr %B, align 4
+  %c = load <4 x i32>, ptr %C, align 4
   %sa = sext <4 x i32> %a to <4 x i64>
   %sb = zext <4 x i32> %b to <4 x i64>
   %sc = zext <4 x i32> %c to <4 x i64>
@@ -69,7 +69,7 @@ entry:
   ret <4 x i32> %t
 }
 
-define arm_aapcs_vfpcc <8 x i16> @loads_i16(<8 x i16> *%A, <8 x i16> *%B, <8 x i16> *%C) {
+define arm_aapcs_vfpcc <8 x i16> @loads_i16(ptr %A, ptr %B, ptr %C) {
 ; CHECK-LABEL: loads_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -90,9 +90,9 @@ define arm_aapcs_vfpcc <8 x i16> @loads_i16(<8 x i16> *%A, <8 x i16> *%B, <8 x i
 ; CHECK-NEXT:    vmovnt.i32 q0, q3
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <8 x i16>, <8 x i16> *%A, align 4
-  %b = load <8 x i16>, <8 x i16> *%B, align 4
-  %c = load <8 x i16>, <8 x i16> *%C, align 4
+  %a = load <8 x i16>, ptr %A, align 4
+  %b = load <8 x i16>, ptr %B, align 4
+  %c = load <8 x i16>, ptr %C, align 4
   %sa = sext <8 x i16> %a to <8 x i32>
   %sb = sext <8 x i16> %b to <8 x i32>
   %sc = zext <8 x i16> %c to <8 x i32>
@@ -102,7 +102,7 @@ entry:
   ret <8 x i16> %t
 }
 
-define arm_aapcs_vfpcc <16 x i8> @loads_i8(<16 x i8> *%A, <16 x i8> *%B, <16 x i8> *%C) {
+define arm_aapcs_vfpcc <16 x i8> @loads_i8(ptr %A, ptr %B, ptr %C) {
 ; CHECK-LABEL: loads_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -123,9 +123,9 @@ define arm_aapcs_vfpcc <16 x i8> @loads_i8(<16 x i8> *%A, <16 x i8> *%B, <16 x i
 ; CHECK-NEXT:    vmovnt.i16 q0, q3
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <16 x i8>, <16 x i8> *%A, align 4
-  %b = load <16 x i8>, <16 x i8> *%B, align 4
-  %c = load <16 x i8>, <16 x i8> *%C, align 4
+  %a = load <16 x i8>, ptr %A, align 4
+  %b = load <16 x i8>, ptr %B, align 4
+  %c = load <16 x i8>, ptr %C, align 4
   %sa = sext <16 x i8> %a to <16 x i16>
   %sb = sext <16 x i8> %b to <16 x i16>
   %sc = zext <16 x i8> %c to <16 x i16>
@@ -135,7 +135,7 @@ entry:
   ret <16 x i8> %t
 }
 
-define arm_aapcs_vfpcc void @load_store_i32(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D) {
+define arm_aapcs_vfpcc void @load_store_i32(ptr %A, ptr %B, ptr %C, ptr %D) {
 ; CHECK-LABEL: load_store_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -195,20 +195,20 @@ define arm_aapcs_vfpcc void @load_store_i32(<4 x i32> *%A, <4 x i32> *%B, <4 x i
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %a = load <4 x i32>, <4 x i32> *%A, align 4
-  %b = load <4 x i32>, <4 x i32> *%B, align 4
-  %c = load <4 x i32>, <4 x i32> *%C, align 4
+  %a = load <4 x i32>, ptr %A, align 4
+  %b = load <4 x i32>, ptr %B, align 4
+  %c = load <4 x i32>, ptr %C, align 4
   %sa = sext <4 x i32> %a to <4 x i64>
   %sb = zext <4 x i32> %b to <4 x i64>
   %sc = zext <4 x i32> %c to <4 x i64>
   %add = add <4 x i64> %sa, %sb
   %sh = ashr <4 x i64> %add, %sc
   %t = trunc <4 x i64> %sh to <4 x i32>
-  store <4 x i32> %t, <4 x i32> *%D, align 4
+  store <4 x i32> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @load_store_i16(<8 x i16> *%A, <8 x i16> *%B, <8 x i16> *%C, <8 x i16> *%D) {
+define arm_aapcs_vfpcc void @load_store_i16(ptr %A, ptr %B, ptr %C, ptr %D) {
 ; CHECK-LABEL: load_store_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1, #8]
@@ -227,20 +227,20 @@ define arm_aapcs_vfpcc void @load_store_i16(<8 x i16> *%A, <8 x i16> *%B, <8 x i
 ; CHECK-NEXT:    vstrh.32 q1, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <8 x i16>, <8 x i16> *%A, align 4
-  %b = load <8 x i16>, <8 x i16> *%B, align 4
-  %c = load <8 x i16>, <8 x i16> *%C, align 4
+  %a = load <8 x i16>, ptr %A, align 4
+  %b = load <8 x i16>, ptr %B, align 4
+  %c = load <8 x i16>, ptr %C, align 4
   %sa = sext <8 x i16> %a to <8 x i32>
   %sb = sext <8 x i16> %b to <8 x i32>
   %sc = zext <8 x i16> %c to <8 x i32>
   %add = add <8 x i32> %sa, %sb
   %sh = ashr <8 x i32> %add, %sc
   %t = trunc <8 x i32> %sh to <8 x i16>
-  store <8 x i16> %t, <8 x i16> *%D, align 4
+  store <8 x i16> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @load_store_i8(<16 x i8> *%A, <16 x i8> *%B, <16 x i8> *%C, <16 x i8> *%D) {
+define arm_aapcs_vfpcc void @load_store_i8(ptr %A, ptr %B, ptr %C, ptr %D) {
 ; CHECK-LABEL: load_store_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r1, #8]
@@ -259,21 +259,21 @@ define arm_aapcs_vfpcc void @load_store_i8(<16 x i8> *%A, <16 x i8> *%B, <16 x i
 ; CHECK-NEXT:    vstrb.16 q1, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <16 x i8>, <16 x i8> *%A, align 4
-  %b = load <16 x i8>, <16 x i8> *%B, align 4
-  %c = load <16 x i8>, <16 x i8> *%C, align 4
+  %a = load <16 x i8>, ptr %A, align 4
+  %b = load <16 x i8>, ptr %B, align 4
+  %c = load <16 x i8>, ptr %C, align 4
   %sa = sext <16 x i8> %a to <16 x i16>
   %sb = sext <16 x i8> %b to <16 x i16>
   %sc = zext <16 x i8> %c to <16 x i16>
   %add = add <16 x i16> %sa, %sb
   %sh = ashr <16 x i16> %add, %sc
   %t = trunc <16 x i16> %sh to <16 x i8>
-  store <16 x i8> %t, <16 x i8> *%D, align 4
+  store <16 x i8> %t, ptr %D, align 4
   ret void
 }
 
 
-define arm_aapcs_vfpcc void @load_one_store_i32(<4 x i32> *%A, <4 x i32> *%D) {
+define arm_aapcs_vfpcc void @load_one_store_i32(ptr %A, ptr %D) {
 ; CHECK-LABEL: load_one_store_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -307,16 +307,16 @@ define arm_aapcs_vfpcc void @load_one_store_i32(<4 x i32> *%A, <4 x i32> *%D) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %a = load <4 x i32>, <4 x i32> *%A, align 4
+  %a = load <4 x i32>, ptr %A, align 4
   %sa = sext <4 x i32> %a to <4 x i64>
   %add = add <4 x i64> %sa, %sa
   %sh = ashr <4 x i64> %add, %sa
   %t = trunc <4 x i64> %sh to <4 x i32>
-  store <4 x i32> %t, <4 x i32> *%D, align 4
+  store <4 x i32> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @load_one_store_i16(<8 x i16> *%A, <8 x i16> *%D) {
+define arm_aapcs_vfpcc void @load_one_store_i16(ptr %A, ptr %D) {
 ; CHECK-LABEL: load_one_store_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #8]
@@ -331,16 +331,16 @@ define arm_aapcs_vfpcc void @load_one_store_i16(<8 x i16> *%A, <8 x i16> *%D) {
 ; CHECK-NEXT:    vstrh.32 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <8 x i16>, <8 x i16> *%A, align 4
+  %a = load <8 x i16>, ptr %A, align 4
   %sa = sext <8 x i16> %a to <8 x i32>
   %add = add <8 x i32> %sa, %sa
   %sh = ashr <8 x i32> %add, %sa
   %t = trunc <8 x i32> %sh to <8 x i16>
-  store <8 x i16> %t, <8 x i16> *%D, align 4
+  store <8 x i16> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @load_one_store_i8(<16 x i8> *%A, <16 x i8> *%D) {
+define arm_aapcs_vfpcc void @load_one_store_i8(ptr %A, ptr %D) {
 ; CHECK-LABEL: load_one_store_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #8]
@@ -355,17 +355,17 @@ define arm_aapcs_vfpcc void @load_one_store_i8(<16 x i8> *%A, <16 x i8> *%D) {
 ; CHECK-NEXT:    vstrb.16 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <16 x i8>, <16 x i8> *%A, align 4
+  %a = load <16 x i8>, ptr %A, align 4
   %sa = sext <16 x i8> %a to <16 x i16>
   %add = add <16 x i16> %sa, %sa
   %sh = ashr <16 x i16> %add, %sa
   %t = trunc <16 x i16> %sh to <16 x i8>
-  store <16 x i8> %t, <16 x i8> *%D, align 4
+  store <16 x i8> %t, ptr %D, align 4
   ret void
 }
 
 
-define arm_aapcs_vfpcc void @mul_i32(<4 x i32> *%A, <4 x i32> *%B, i64 %C, <4 x i32> *%D) {
+define arm_aapcs_vfpcc void @mul_i32(ptr %A, ptr %B, i64 %C, ptr %D) {
 ; CHECK-LABEL: mul_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -397,8 +397,8 @@ define arm_aapcs_vfpcc void @mul_i32(<4 x i32> *%A, <4 x i32> *%B, i64 %C, <4 x
 ; CHECK-NEXT:    vstrw.32 q0, [lr]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %a = load <4 x i32>, <4 x i32> *%A, align 4
-  %b = load <4 x i32>, <4 x i32> *%B, align 4
+  %a = load <4 x i32>, ptr %A, align 4
+  %b = load <4 x i32>, ptr %B, align 4
   %i = insertelement <4 x i64> undef, i64 %C, i32 0
   %c = shufflevector <4 x i64> %i, <4 x i64> undef, <4 x i32> zeroinitializer
   %sa = sext <4 x i32> %a to <4 x i64>
@@ -406,11 +406,11 @@ entry:
   %add = mul <4 x i64> %sa, %sb
   %sh = ashr <4 x i64> %add, %c
   %t = trunc <4 x i64> %sh to <4 x i32>
-  store <4 x i32> %t, <4 x i32> *%D, align 4
+  store <4 x i32> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @mul_i16(<8 x i16> *%A, <8 x i16> *%B, i32 %C, <8 x i16> *%D) {
+define arm_aapcs_vfpcc void @mul_i16(ptr %A, ptr %B, i32 %C, ptr %D) {
 ; CHECK-LABEL: mul_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -424,8 +424,8 @@ define arm_aapcs_vfpcc void @mul_i16(<8 x i16> *%A, <8 x i16> *%B, i32 %C, <8 x
 ; CHECK-NEXT:    vstrw.32 q0, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <8 x i16>, <8 x i16> *%A, align 4
-  %b = load <8 x i16>, <8 x i16> *%B, align 4
+  %a = load <8 x i16>, ptr %A, align 4
+  %b = load <8 x i16>, ptr %B, align 4
   %i = insertelement <8 x i32> undef, i32 %C, i32 0
   %c = shufflevector <8 x i32> %i, <8 x i32> undef, <8 x i32> zeroinitializer
   %sa = sext <8 x i16> %a to <8 x i32>
@@ -433,11 +433,11 @@ entry:
   %add = mul <8 x i32> %sa, %sb
   %sh = ashr <8 x i32> %add, %c
   %t = trunc <8 x i32> %sh to <8 x i16>
-  store <8 x i16> %t, <8 x i16> *%D, align 4
+  store <8 x i16> %t, ptr %D, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @mul_i8(<16 x i8> *%A, <16 x i8> *%B, i16 %C, <16 x i8> *%D) {
+define arm_aapcs_vfpcc void @mul_i8(ptr %A, ptr %B, i16 %C, ptr %D) {
 ; CHECK-LABEL: mul_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -451,8 +451,8 @@ define arm_aapcs_vfpcc void @mul_i8(<16 x i8> *%A, <16 x i8> *%B, i16 %C, <16 x
 ; CHECK-NEXT:    vstrw.32 q0, [r3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %a = load <16 x i8>, <16 x i8> *%A, align 4
-  %b = load <16 x i8>, <16 x i8> *%B, align 4
+  %a = load <16 x i8>, ptr %A, align 4
+  %b = load <16 x i8>, ptr %B, align 4
   %i = insertelement <16 x i16> undef, i16 %C, i32 0
   %c = shufflevector <16 x i16> %i, <16 x i16> undef, <16 x i32> zeroinitializer
   %sa = sext <16 x i8> %a to <16 x i16>
@@ -460,6 +460,6 @@ entry:
   %add = mul <16 x i16> %sa, %sb
   %sh = ashr <16 x i16> %add, %c
   %t = trunc <16 x i16> %sh to <16 x i8>
-  store <16 x i8> %t, <16 x i8> *%D, align 4
+  store <16 x i8> %t, ptr %D, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-ldst-offset.ll b/llvm/test/CodeGen/Thumb2/mve-ldst-offset.ll
index 1e940ad826572..4f9ca257b987c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-ldst-offset.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-ldst-offset.ll
@@ -2,22 +2,20 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
 
-define i8* @ldrwu32_4(i8* %x, i8* %y) {
+define ptr @ldrwu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y) {
+define ptr @ldrwu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r2, r0, #3
@@ -25,45 +23,39 @@ define i8* @ldrwu32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_m4(i8* %x, i8* %y) {
+define ptr @ldrwu32_m4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #-4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -4
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y) {
+define ptr @ldrwu32_508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #508]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y) {
+define ptr @ldrwu32_512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #512
@@ -71,30 +63,26 @@ define i8* @ldrwu32_512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #-508]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #512
@@ -102,32 +90,28 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
 
-define i8* @ldrhu32_4(i8* %x, i8* %y) {
+define ptr @ldrhu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y) {
+define ptr @ldrhu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r2, r0, #3
@@ -135,48 +119,42 @@ define i8* @ldrhu32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y) {
+define ptr @ldrhu32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #2]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y) {
+define ptr @ldrhu32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #254]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y) {
+define ptr @ldrhu32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #256
@@ -184,32 +162,28 @@ define i8* @ldrhu32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #-254]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #256
@@ -217,33 +191,29 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
 
-define i8* @ldrhs32_4(i8* %x, i8* %y) {
+define ptr @ldrhs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y) {
+define ptr @ldrhs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r2, r0, #3
@@ -251,48 +221,42 @@ define i8* @ldrhs32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y) {
+define ptr @ldrhs32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #2]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y) {
+define ptr @ldrhs32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #254]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y) {
+define ptr @ldrhs32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #256
@@ -300,32 +264,28 @@ define i8* @ldrhs32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #-254]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #256
@@ -333,32 +293,28 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
 
-define i8* @ldrhu16_4(i8* %x, i8* %y) {
+define ptr @ldrhu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #4]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y) {
+define ptr @ldrhu16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r2, r0, #3
@@ -366,45 +322,39 @@ define i8* @ldrhu16_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y) {
+define ptr @ldrhu16_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #2]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y) {
+define ptr @ldrhu16_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #254]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y) {
+define ptr @ldrhu16_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #256
@@ -412,30 +362,26 @@ define i8* @ldrhu16_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #-254]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #256
@@ -443,64 +389,56 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
 
-define i8* @ldrbu32_4(i8* %x, i8* %y) {
+define ptr @ldrbu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y) {
+define ptr @ldrbu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #3]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y) {
+define ptr @ldrbu32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #127]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y) {
+define ptr @ldrbu32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #128
@@ -508,32 +446,28 @@ define i8* @ldrbu32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #-127]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #128
@@ -541,65 +475,57 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
 
-define i8* @ldrbs32_4(i8* %x, i8* %y) {
+define ptr @ldrbs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y) {
+define ptr @ldrbs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #3]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y) {
+define ptr @ldrbs32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #127]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y) {
+define ptr @ldrbs32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #128
@@ -607,32 +533,28 @@ define i8* @ldrbs32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #-127]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #128
@@ -640,65 +562,57 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
 
-define i8* @ldrbu16_4(i8* %x, i8* %y) {
+define ptr @ldrbu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #4]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y) {
+define ptr @ldrbu16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #3]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y) {
+define ptr @ldrbu16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #127]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y) {
+define ptr @ldrbu16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #128
@@ -706,32 +620,28 @@ define i8* @ldrbu16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #-127]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #128
@@ -739,65 +649,57 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
 
-define i8* @ldrbs16_4(i8* %x, i8* %y) {
+define ptr @ldrbs16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #4]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y) {
+define ptr @ldrbs16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #3]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y) {
+define ptr @ldrbs16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #127]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y) {
+define ptr @ldrbs16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #128
@@ -805,32 +707,28 @@ define i8* @ldrbs16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #-127]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #128
@@ -838,62 +736,54 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
 
-define i8* @ldrbu8_4(i8* %x, i8* %y) {
+define ptr @ldrbu8_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #4]
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y) {
+define ptr @ldrbu8_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #3]
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y) {
+define ptr @ldrbu8_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #127]
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y) {
+define ptr @ldrbu8_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r2, r0, #128
@@ -901,30 +791,26 @@ define i8* @ldrbu8_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #-127]
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r2, r0, #128
@@ -932,46 +818,40 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
 
-define i8* @ldrwf32_4(i8* %x, i8* %y) {
+define ptr @ldrwf32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #4]
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x float>, ptr %z, align 4
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf16_4(i8* %x, i8* %y) {
+define ptr @ldrwf16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #4]
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x half>, ptr %z, align 2
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrwi32_align1(i8* %x, i8* %y) {
+define ptr @ldrwi32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]
@@ -985,15 +865,13 @@ define i8* @ldrwi32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %z, align 1
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhi16_align1(i8* %x, i8* %y) {
+define ptr @ldrhi16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]
@@ -1007,15 +885,13 @@ define i8* @ldrhi16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %z, align 1
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhi32_align1(i8* %x, i8* %y) {
+define ptr @ldrhi32_align1(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1029,16 +905,14 @@ define i8* @ldrhi32_align1(i8* %x, i8* %y) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  %3 = sext <4 x i16> %1 to <4 x i32>
-  store <4 x i32> %3, <4 x i32>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 1
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrf32_align1(i8* %x, i8* %y) {
+define ptr @ldrf32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]
@@ -1052,15 +926,13 @@ define i8* @ldrf32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 1
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x float>, ptr %z, align 1
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrf16_align1(i8* %x, i8* %y) {
+define ptr @ldrf16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]
@@ -1074,15 +946,13 @@ define i8* @ldrf16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 1
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x half>, ptr %z, align 1
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrh16_align8(i8* %x, i8* %y) {
+define ptr @ldrh16_align8(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrh16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0, #4]
@@ -1095,34 +965,30 @@ define i8* @ldrh16_align8(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 8
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %z, align 8
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x) {
+define ptr @strw32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_3(i8* %y, i8* %x) {
+define ptr @strw32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -1130,45 +996,39 @@ define i8* @strw32_3(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_m4(i8* %y, i8* %x) {
+define ptr @strw32_m4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #-4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_508(i8* %y, i8* %x) {
+define ptr @strw32_508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_512(i8* %y, i8* %x) {
+define ptr @strw32_512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -1176,30 +1036,26 @@ define i8* @strw32_512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x) {
+define ptr @strw32_m508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #-508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x) {
+define ptr @strw32_m512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -1207,31 +1063,27 @@ define i8* @strw32_m512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %y
 }
 
 
-define i8* @strh32_4(i8* %y, i8* %x) {
+define ptr @strh32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_3(i8* %y, i8* %x) {
+define ptr @strh32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1239,45 +1091,39 @@ define i8* @strh32_3(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_2(i8* %y, i8* %x) {
+define ptr @strh32_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_254(i8* %y, i8* %x) {
+define ptr @strh32_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_256(i8* %y, i8* %x) {
+define ptr @strh32_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1285,30 +1131,26 @@ define i8* @strh32_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x) {
+define ptr @strh32_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #-254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x) {
+define ptr @strh32_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1316,31 +1158,27 @@ define i8* @strh32_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
 
-define i8* @strh16_4(i8* %y, i8* %x) {
+define ptr @strh16_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_3(i8* %y, i8* %x) {
+define ptr @strh16_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -1348,45 +1186,39 @@ define i8* @strh16_3(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_2(i8* %y, i8* %x) {
+define ptr @strh16_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_254(i8* %y, i8* %x) {
+define ptr @strh16_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_256(i8* %y, i8* %x) {
+define ptr @strh16_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -1394,30 +1226,26 @@ define i8* @strh16_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x) {
+define ptr @strh16_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #-254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x) {
+define ptr @strh16_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -1425,61 +1253,53 @@ define i8* @strh16_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %y
 }
 
 
-define i8* @strb32_4(i8* %y, i8* %x) {
+define ptr @strb32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb32_3(i8* %y, i8* %x) {
+define ptr @strb32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb32_127(i8* %y, i8* %x) {
+define ptr @strb32_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb32_128(i8* %y, i8* %x) {
+define ptr @strb32_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -1487,30 +1307,26 @@ define i8* @strb32_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x) {
+define ptr @strb32_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x) {
+define ptr @strb32_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -1518,61 +1334,53 @@ define i8* @strb32_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
 
-define i8* @strb16_4(i8* %y, i8* %x) {
+define ptr @strb16_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb16_3(i8* %y, i8* %x) {
+define ptr @strb16_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb16_127(i8* %y, i8* %x) {
+define ptr @strb16_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb16_128(i8* %y, i8* %x) {
+define ptr @strb16_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -1580,30 +1388,26 @@ define i8* @strb16_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x) {
+define ptr @strb16_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x) {
+define ptr @strb16_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -1611,61 +1415,53 @@ define i8* @strb16_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
 
-define i8* @strb8_4(i8* %y, i8* %x) {
+define ptr @strb8_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb8_3(i8* %y, i8* %x) {
+define ptr @strb8_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb8_127(i8* %y, i8* %x) {
+define ptr @strb8_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb8_128(i8* %y, i8* %x) {
+define ptr @strb8_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
@@ -1673,30 +1469,26 @@ define i8* @strb8_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x) {
+define ptr @strb8_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x) {
+define ptr @strb8_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
@@ -1704,46 +1496,40 @@ define i8* @strb8_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %y
 }
 
 
-define i8* @strf32_4(i8* %y, i8* %x) {
+define ptr @strf32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %z, align 4
+  ret ptr %y
 }
 
-define i8* @strf16_4(i8* %y, i8* %x) {
+define ptr @strf16_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %z, align 2
+  ret ptr %y
 }
 
-define i8* @strwi32_align1(i8* %y, i8* %x) {
+define ptr @strwi32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1757,15 +1543,13 @@ define i8* @strwi32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0, #3]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strhi16_align1(i8* %y, i8* %x) {
+define ptr @strhi16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1779,15 +1563,13 @@ define i8* @strhi16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0, #3]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strhi32_align1(i8* %y, i8* %x) {
+define ptr @strhi32_align1(ptr %y, ptr %x) {
 ; CHECK-LABEL: strhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1801,16 +1583,14 @@ define i8* @strhi32_align1(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i16>*
-  %3 = trunc <4 x i32> %1 to <4 x i16>
-  store <4 x i16> %3, <4 x i16>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  %1 = trunc <4 x i32> %0 to <4 x i16>
+  store <4 x i16> %1, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strf32_align1(i8* %y, i8* %x) {
+define ptr @strf32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1824,15 +1604,13 @@ define i8* @strf32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0, #3]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strf16_align1(i8* %y, i8* %x) {
+define ptr @strf16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1846,15 +1624,13 @@ define i8* @strf16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0, #3]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 1
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %z, align 1
+  ret ptr %y
 }
 
-define i8* @strf16_align8(i8* %y, i8* %x) {
+define ptr @strf16_align8(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1867,10 +1643,8 @@ define i8* @strf16_align8(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0, #16]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 16
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 8
-  ret i8* %y
+  %z = getelementptr inbounds i8, ptr %y, i32 16
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 8
+  ret ptr %y
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-ldst-postinc.ll b/llvm/test/CodeGen/Thumb2/mve-ldst-postinc.ll
index 905f7de193aea..a9be723dd9107 100644
--- a/llvm/test/CodeGen/Thumb2/mve-ldst-postinc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-ldst-postinc.ll
@@ -2,22 +2,20 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
 
-define i8* @ldrwu32_4(i8* %x, i8* %y) {
+define ptr @ldrwu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y) {
+define ptr @ldrwu32_3(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrwu32_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -31,45 +29,39 @@ define i8* @ldrwu32_3(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m4(i8* %x, i8* %y) {
+define ptr @ldrwu32_m4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #-4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y) {
+define ptr @ldrwu32_508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #508
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y) {
+define ptr @ldrwu32_512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -77,30 +69,26 @@ define i8* @ldrwu32_512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #-508
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -108,32 +96,28 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhu32_4(i8* %x, i8* %y) {
+define ptr @ldrhu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y) {
+define ptr @ldrhu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -141,48 +125,42 @@ define i8* @ldrhu32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y) {
+define ptr @ldrhu32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0], #2
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y) {
+define ptr @ldrhu32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0], #254
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y) {
+define ptr @ldrhu32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -190,32 +168,28 @@ define i8* @ldrhu32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0], #-254
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -223,33 +197,29 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhs32_4(i8* %x, i8* %y) {
+define ptr @ldrhs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y) {
+define ptr @ldrhs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -257,48 +227,42 @@ define i8* @ldrhs32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y) {
+define ptr @ldrhs32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0], #2
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y) {
+define ptr @ldrhs32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0], #254
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y) {
+define ptr @ldrhs32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -306,32 +270,28 @@ define i8* @ldrhs32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0], #-254
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -339,32 +299,28 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhu16_4(i8* %x, i8* %y) {
+define ptr @ldrhu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #4
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y) {
+define ptr @ldrhu16_3(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrhu16_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -378,45 +334,39 @@ define i8* @ldrhu16_3(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y) {
+define ptr @ldrhu16_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #2
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y) {
+define ptr @ldrhu16_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #254
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y) {
+define ptr @ldrhu16_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -424,30 +374,26 @@ define i8* @ldrhu16_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #-254
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -455,64 +401,56 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbu32_4(i8* %x, i8* %y) {
+define ptr @ldrbu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y) {
+define ptr @ldrbu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0], #3
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y) {
+define ptr @ldrbu32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0], #127
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y) {
+define ptr @ldrbu32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -520,32 +458,28 @@ define i8* @ldrbu32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0], #-127
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -553,65 +487,57 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrbs32_4(i8* %x, i8* %y) {
+define ptr @ldrbs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y) {
+define ptr @ldrbs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0], #3
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y) {
+define ptr @ldrbs32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0], #127
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y) {
+define ptr @ldrbs32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0]
@@ -619,32 +545,28 @@ define i8* @ldrbs32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0], #-127
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0]
@@ -652,65 +574,57 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrbu16_4(i8* %x, i8* %y) {
+define ptr @ldrbu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0], #4
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y) {
+define ptr @ldrbu16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0], #3
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y) {
+define ptr @ldrbu16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0], #127
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y) {
+define ptr @ldrbu16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -718,32 +632,28 @@ define i8* @ldrbu16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0], #-127
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -751,65 +661,57 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbs16_4(i8* %x, i8* %y) {
+define ptr @ldrbs16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0], #4
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y) {
+define ptr @ldrbs16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0], #3
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y) {
+define ptr @ldrbs16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0], #127
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y) {
+define ptr @ldrbs16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0]
@@ -817,32 +719,28 @@ define i8* @ldrbs16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0], #-127
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0]
@@ -850,62 +748,54 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbu8_4(i8* %x, i8* %y) {
+define ptr @ldrbu8_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #4
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y) {
+define ptr @ldrbu8_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #3
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y) {
+define ptr @ldrbu8_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #127
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y) {
+define ptr @ldrbu8_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -913,30 +803,26 @@ define i8* @ldrbu8_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0], #-127
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -944,46 +830,40 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
 
-define i8* @ldrwf32_4(i8* %x, i8* %y) {
+define ptr @ldrwf32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0], #4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf16_4(i8* %x, i8* %y) {
+define ptr @ldrwf16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #4
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrwi32_align1(i8* %x, i8* %y) {
+define ptr @ldrwi32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -997,15 +877,13 @@ define i8* @ldrwi32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %x, align 1
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhi16_align1(i8* %x, i8* %y) {
+define ptr @ldrhi16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -1019,15 +897,13 @@ define i8* @ldrhi16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %x, align 1
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhi32_align1(i8* %x, i8* %y) {
+define ptr @ldrhi32_align1(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1042,16 +918,14 @@ define i8* @ldrhi32_align1(i8* %x, i8* %y) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  %3 = sext <4 x i16> %1 to <4 x i32>
-  store <4 x i32> %3, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %x, align 1
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrf32_align1(i8* %x, i8* %y) {
+define ptr @ldrf32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -1065,15 +939,13 @@ define i8* @ldrf32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 1
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x float>, ptr %x, align 1
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrf16_align1(i8* %x, i8* %y) {
+define ptr @ldrf16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0], #3
@@ -1087,15 +959,13 @@ define i8* @ldrf16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 1
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x half>, ptr %x, align 1
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrh16_align8(i8* %x, i8* %y) {
+define ptr @ldrh16_align8(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrh16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0], #4
@@ -1108,19 +978,17 @@ define i8* @ldrh16_align8(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 8
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %x, align 8
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x) {
+define ptr @strw32_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1133,15 +1001,13 @@ define i8* @strw32_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_3(i8* %y, i8* %x) {
+define ptr @strw32_3(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1155,15 +1021,13 @@ define i8* @strw32_3(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m4(i8* %y, i8* %x) {
+define ptr @strw32_m4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_m4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1176,30 +1040,26 @@ define i8* @strw32_m4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0], #-4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_508(i8* %y, i8* %x) {
+define ptr @strw32_508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0], #508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_512(i8* %y, i8* %x) {
+define ptr @strw32_512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -1207,30 +1067,26 @@ define i8* @strw32_512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x) {
+define ptr @strw32_m508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0], #-508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x) {
+define ptr @strw32_m512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -1238,31 +1094,27 @@ define i8* @strw32_m512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    sub.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @strh32_4(i8* %y, i8* %x) {
+define ptr @strh32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_3(i8* %y, i8* %x) {
+define ptr @strh32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1270,45 +1122,39 @@ define i8* @strh32_3(i8* %y, i8* %x) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_2(i8* %y, i8* %x) {
+define ptr @strh32_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_254(i8* %y, i8* %x) {
+define ptr @strh32_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0], #254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_256(i8* %y, i8* %x) {
+define ptr @strh32_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1316,30 +1162,26 @@ define i8* @strh32_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x) {
+define ptr @strh32_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0], #-254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x) {
+define ptr @strh32_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -1347,16 +1189,14 @@ define i8* @strh32_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    sub.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @strh16_4(i8* %y, i8* %x) {
+define ptr @strh16_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1369,15 +1209,13 @@ define i8* @strh16_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_3(i8* %y, i8* %x) {
+define ptr @strh16_3(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1391,15 +1229,13 @@ define i8* @strh16_3(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_2(i8* %y, i8* %x) {
+define ptr @strh16_2(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_2:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1412,30 +1248,26 @@ define i8* @strh16_2(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0], #2
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_254(i8* %y, i8* %x) {
+define ptr @strh16_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0], #254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_256(i8* %y, i8* %x) {
+define ptr @strh16_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -1443,30 +1275,26 @@ define i8* @strh16_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x) {
+define ptr @strh16_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0], #-254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x) {
+define ptr @strh16_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -1474,61 +1302,53 @@ define i8* @strh16_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    sub.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @strb32_4(i8* %y, i8* %x) {
+define ptr @strb32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_3(i8* %y, i8* %x) {
+define ptr @strb32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_127(i8* %y, i8* %x) {
+define ptr @strb32_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_128(i8* %y, i8* %x) {
+define ptr @strb32_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -1536,30 +1356,26 @@ define i8* @strb32_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x) {
+define ptr @strb32_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x) {
+define ptr @strb32_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -1567,61 +1383,53 @@ define i8* @strb32_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
 
-define i8* @strb16_4(i8* %y, i8* %x) {
+define ptr @strb16_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_3(i8* %y, i8* %x) {
+define ptr @strb16_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_127(i8* %y, i8* %x) {
+define ptr @strb16_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_128(i8* %y, i8* %x) {
+define ptr @strb16_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -1629,30 +1437,26 @@ define i8* @strb16_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x) {
+define ptr @strb16_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x) {
+define ptr @strb16_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -1660,61 +1464,53 @@ define i8* @strb16_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
 
-define i8* @strb8_4(i8* %y, i8* %x) {
+define ptr @strb8_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_3(i8* %y, i8* %x) {
+define ptr @strb8_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_127(i8* %y, i8* %x) {
+define ptr @strb8_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_128(i8* %y, i8* %x) {
+define ptr @strb8_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
@@ -1722,30 +1518,26 @@ define i8* @strb8_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x) {
+define ptr @strb8_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x) {
+define ptr @strb8_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
@@ -1753,16 +1545,14 @@ define i8* @strb8_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
 
-define i8* @strf32_4(i8* %y, i8* %x) {
+define ptr @strf32_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf32_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1775,15 +1565,13 @@ define i8* @strf32_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @strf16_4(i8* %y, i8* %x) {
+define ptr @strf16_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1796,15 +1584,13 @@ define i8* @strf16_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @strwi32_align1(i8* %y, i8* %x) {
+define ptr @strwi32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1818,15 +1604,13 @@ define i8* @strwi32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0], #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strhi16_align1(i8* %y, i8* %x) {
+define ptr @strhi16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1840,15 +1624,13 @@ define i8* @strhi16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0], #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strhi32_align1(i8* %y, i8* %x) {
+define ptr @strhi32_align1(ptr %y, ptr %x) {
 ; CHECK-LABEL: strhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1863,16 +1645,14 @@ define i8* @strhi32_align1(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i16>*
-  %3 = trunc <4 x i32> %1 to <4 x i16>
-  store <4 x i16> %3, <4 x i16>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  %1 = trunc <4 x i32> %0 to <4 x i16>
+  store <4 x i16> %1, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strf32_align1(i8* %y, i8* %x) {
+define ptr @strf32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1886,15 +1666,13 @@ define i8* @strf32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0], #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strf16_align1(i8* %y, i8* %x) {
+define ptr @strf16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1908,15 +1686,13 @@ define i8* @strf16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r0], #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @strf16_align8(i8* %y, i8* %x) {
+define ptr @strf16_align8(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1929,10 +1705,8 @@ define i8* @strf16_align8(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0], #16
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 16
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 8
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 16
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %y, align 8
+  ret ptr %z
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-ldst-preinc.ll b/llvm/test/CodeGen/Thumb2/mve-ldst-preinc.ll
index 8d64f60280127..11b0115d27d33 100644
--- a/llvm/test/CodeGen/Thumb2/mve-ldst-preinc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-ldst-preinc.ll
@@ -2,22 +2,20 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
 
-define i8* @ldrwu32_4(i8* %x, i8* %y) {
+define ptr @ldrwu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y) {
+define ptr @ldrwu32_3(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrwu32_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -31,45 +29,39 @@ define i8* @ldrwu32_3(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m4(i8* %x, i8* %y) {
+define ptr @ldrwu32_m4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #-4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -4
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y) {
+define ptr @ldrwu32_508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #508]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y) {
+define ptr @ldrwu32_512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #512
@@ -77,30 +69,26 @@ define i8* @ldrwu32_512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #-508]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #512
@@ -108,32 +96,28 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %0 = load <4 x i32>, ptr %z, align 4
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhu32_4(i8* %x, i8* %y) {
+define ptr @ldrhu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y) {
+define ptr @ldrhu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #3
@@ -141,48 +125,42 @@ define i8* @ldrhu32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y) {
+define ptr @ldrhu32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #2]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y) {
+define ptr @ldrhu32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #254]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y) {
+define ptr @ldrhu32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #256
@@ -190,32 +168,28 @@ define i8* @ldrhu32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #-254]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #256
@@ -223,33 +197,29 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhs32_4(i8* %x, i8* %y) {
+define ptr @ldrhs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y) {
+define ptr @ldrhs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #3
@@ -257,48 +227,42 @@ define i8* @ldrhs32_3(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y) {
+define ptr @ldrhs32_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #2]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y) {
+define ptr @ldrhs32_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #254]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y) {
+define ptr @ldrhs32_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #256
@@ -306,32 +270,28 @@ define i8* @ldrhs32_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0, #-254]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #256
@@ -339,32 +299,28 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <4 x i16>, ptr %z, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrhu16_4(i8* %x, i8* %y) {
+define ptr @ldrhu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y) {
+define ptr @ldrhu16_3(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrhu16_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -378,45 +334,39 @@ define i8* @ldrhu16_3(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y) {
+define ptr @ldrhu16_2(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #2]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y) {
+define ptr @ldrhu16_254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #254]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y) {
+define ptr @ldrhu16_256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #256
@@ -424,30 +374,26 @@ define i8* @ldrhu16_256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #-254]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #256
@@ -455,64 +401,56 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %0 = load <8 x i16>, ptr %z, align 2
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbu32_4(i8* %x, i8* %y) {
+define ptr @ldrbu32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y) {
+define ptr @ldrbu32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #3]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y) {
+define ptr @ldrbu32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #127]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y) {
+define ptr @ldrbu32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -520,32 +458,28 @@ define i8* @ldrbu32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #-127]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -553,65 +487,57 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrbs32_4(i8* %x, i8* %y) {
+define ptr @ldrbs32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y) {
+define ptr @ldrbs32_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #3]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y) {
+define ptr @ldrbs32_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #127]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y) {
+define ptr @ldrbs32_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -619,32 +545,28 @@ define i8* @ldrbs32_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r0, #-127]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -652,65 +574,57 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <4 x i8>, ptr %z, align 1
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define i8* @ldrbu16_4(i8* %x, i8* %y) {
+define ptr @ldrbu16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y) {
+define ptr @ldrbu16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #3]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y) {
+define ptr @ldrbu16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #127]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y) {
+define ptr @ldrbu16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -718,32 +632,28 @@ define i8* @ldrbu16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #-127]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -751,65 +661,57 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbs16_4(i8* %x, i8* %y) {
+define ptr @ldrbs16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y) {
+define ptr @ldrbs16_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #3]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y) {
+define ptr @ldrbs16_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #127]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y) {
+define ptr @ldrbs16_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -817,32 +719,28 @@ define i8* @ldrbs16_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r0, #-127]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -850,62 +748,54 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <8 x i8>, ptr %z, align 1
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
 
-define i8* @ldrbu8_4(i8* %x, i8* %y) {
+define ptr @ldrbu8_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y) {
+define ptr @ldrbu8_3(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #3]!
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y) {
+define ptr @ldrbu8_127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #127]!
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y) {
+define ptr @ldrbu8_128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -913,30 +803,26 @@ define i8* @ldrbu8_128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #-127]!
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -944,46 +830,40 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %0 = load <16 x i8>, ptr %z, align 1
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
 
-define i8* @ldrwf32_4(i8* %x, i8* %y) {
+define ptr @ldrwf32_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <4 x float>, ptr %z, align 4
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf16_4(i8* %x, i8* %y) {
+define ptr @ldrwf16_4(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrwf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0, #4]!
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x half>, ptr %z, align 2
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrwi32_align1(i8* %x, i8* %y) {
+define ptr @ldrwi32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -998,15 +878,13 @@ define i8* @ldrwi32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i32>, ptr %z, align 1
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhi16_align1(i8* %x, i8* %y) {
+define ptr @ldrhi16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -1021,15 +899,13 @@ define i8* @ldrhi16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x i16>, ptr %z, align 1
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhi32_align1(i8* %x, i8* %y) {
+define ptr @ldrhi32_align1(ptr %x, ptr %y) {
 ; CHECK-LABEL: ldrhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1044,16 +920,14 @@ define i8* @ldrhi32_align1(i8* %x, i8* %y) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i32>*
-  %3 = sext <4 x i16> %1 to <4 x i32>
-  store <4 x i32> %3, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x i16>, ptr %z, align 1
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrf32_align1(i8* %x, i8* %y) {
+define ptr @ldrf32_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -1068,15 +942,13 @@ define i8* @ldrf32_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 1
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <4 x float>, ptr %z, align 1
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrf16_align1(i8* %x, i8* %y) {
+define ptr @ldrf16_align1(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0, #3]!
@@ -1091,15 +963,13 @@ define i8* @ldrf16_align1(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 1
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %0 = load <8 x half>, ptr %z, align 1
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrh16_align8(i8* %x, i8* %y) {
+define ptr @ldrh16_align8(ptr %x, ptr %y) {
 ; CHECK-LE-LABEL: ldrh16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0, #4]!
@@ -1112,19 +982,17 @@ define i8* @ldrh16_align8(i8* %x, i8* %y) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 8
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %0 = load <8 x i16>, ptr %z, align 8
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x) {
+define ptr @strw32_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1137,15 +1005,13 @@ define i8* @strw32_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_3(i8* %y, i8* %x) {
+define ptr @strw32_3(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1159,15 +1025,13 @@ define i8* @strw32_3(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m4(i8* %y, i8* %x) {
+define ptr @strw32_m4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strw32_m4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1180,30 +1044,26 @@ define i8* @strw32_m4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0, #-4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -4
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_508(i8* %y, i8* %x) {
+define ptr @strw32_508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_512(i8* %y, i8* %x) {
+define ptr @strw32_512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #512
@@ -1211,30 +1071,26 @@ define i8* @strw32_512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x) {
+define ptr @strw32_m508(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #-508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x) {
+define ptr @strw32_m512(ptr %y, ptr %x) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #512
@@ -1242,31 +1098,27 @@ define i8* @strw32_m512(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 4
+  ret ptr %z
 }
 
 
-define i8* @strh32_4(i8* %y, i8* %x) {
+define ptr @strh32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_3(i8* %y, i8* %x) {
+define ptr @strh32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #3
@@ -1274,45 +1126,39 @@ define i8* @strh32_3(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_2(i8* %y, i8* %x) {
+define ptr @strh32_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_254(i8* %y, i8* %x) {
+define ptr @strh32_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_256(i8* %y, i8* %x) {
+define ptr @strh32_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #256
@@ -1320,30 +1166,26 @@ define i8* @strh32_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x) {
+define ptr @strh32_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, #-254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x) {
+define ptr @strh32_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #256
@@ -1351,16 +1193,14 @@ define i8* @strh32_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  store <4 x i16> %1, <4 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <4 x i16>, ptr %x, align 2
+  store <4 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
 
-define i8* @strh16_4(i8* %y, i8* %x) {
+define ptr @strh16_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1373,15 +1213,13 @@ define i8* @strh16_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_3(i8* %y, i8* %x) {
+define ptr @strh16_3(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_3:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1395,15 +1233,13 @@ define i8* @strh16_3(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_2(i8* %y, i8* %x) {
+define ptr @strh16_2(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strh16_2:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1416,30 +1252,26 @@ define i8* @strh16_2(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0, #2]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_254(i8* %y, i8* %x) {
+define ptr @strh16_254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_256(i8* %y, i8* %x) {
+define ptr @strh16_256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    add.w r0, r0, #256
@@ -1447,30 +1279,26 @@ define i8* @strh16_256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x) {
+define ptr @strh16_m254(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #-254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x) {
+define ptr @strh16_m256(ptr %y, ptr %x) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    sub.w r0, r0, #256
@@ -1478,61 +1306,53 @@ define i8* @strh16_m256(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 2
+  ret ptr %z
 }
 
 
-define i8* @strb32_4(i8* %y, i8* %x) {
+define ptr @strb32_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_3(i8* %y, i8* %x) {
+define ptr @strb32_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_127(i8* %y, i8* %x) {
+define ptr @strb32_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_128(i8* %y, i8* %x) {
+define ptr @strb32_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -1540,30 +1360,26 @@ define i8* @strb32_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x) {
+define ptr @strb32_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x) {
+define ptr @strb32_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -1571,61 +1387,53 @@ define i8* @strb32_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  store <4 x i8> %1, <4 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <4 x i8>, ptr %x, align 1
+  store <4 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
 
-define i8* @strb16_4(i8* %y, i8* %x) {
+define ptr @strb16_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_3(i8* %y, i8* %x) {
+define ptr @strb16_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_127(i8* %y, i8* %x) {
+define ptr @strb16_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_128(i8* %y, i8* %x) {
+define ptr @strb16_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -1633,30 +1441,26 @@ define i8* @strb16_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x) {
+define ptr @strb16_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x) {
+define ptr @strb16_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -1664,61 +1468,53 @@ define i8* @strb16_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  store <8 x i8> %1, <8 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <8 x i8>, ptr %x, align 1
+  store <8 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
 
-define i8* @strb8_4(i8* %y, i8* %x) {
+define ptr @strb8_4(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_3(i8* %y, i8* %x) {
+define ptr @strb8_3(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_127(i8* %y, i8* %x) {
+define ptr @strb8_127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_128(i8* %y, i8* %x) {
+define ptr @strb8_128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adds r0, #128
@@ -1726,30 +1522,26 @@ define i8* @strb8_128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x) {
+define ptr @strb8_m127(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x) {
+define ptr @strb8_m128(ptr %y, ptr %x) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    subs r0, #128
@@ -1757,16 +1549,14 @@ define i8* @strb8_m128(i8* %y, i8* %x) {
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %0 = load <16 x i8>, ptr %x, align 1
+  store <16 x i8> %0, ptr %z, align 1
+  ret ptr %z
 }
 
 
-define i8* @strf32_4(i8* %y, i8* %x) {
+define ptr @strf32_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf32_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1779,15 +1569,13 @@ define i8* @strf32_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %z, align 4
+  ret ptr %z
 }
 
-define i8* @strf16_4(i8* %y, i8* %x) {
+define ptr @strf16_4(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1800,15 +1588,13 @@ define i8* @strf16_4(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %z, align 2
+  ret ptr %z
 }
 
-define i8* @strwi32_align1(i8* %y, i8* %x) {
+define ptr @strwi32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strwi32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1823,15 +1609,13 @@ define i8* @strwi32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  store <4 x i32> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strhi16_align1(i8* %y, i8* %x) {
+define ptr @strhi16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strhi16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1846,15 +1630,13 @@ define i8* @strhi16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strhi32_align1(i8* %y, i8* %x) {
+define ptr @strhi32_align1(ptr %y, ptr %x) {
 ; CHECK-LABEL: strhi32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -1868,16 +1650,14 @@ define i8* @strhi32_align1(i8* %y, i8* %x) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i16>*
-  %3 = trunc <4 x i32> %1 to <4 x i16>
-  store <4 x i16> %3, <4 x i16>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x i32>, ptr %x, align 4
+  %1 = trunc <4 x i32> %0 to <4 x i16>
+  store <4 x i16> %1, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strf32_align1(i8* %y, i8* %x) {
+define ptr @strf32_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
@@ -1892,15 +1672,13 @@ define i8* @strf32_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <4 x float>, ptr %x, align 4
+  store <4 x float> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strf16_align1(i8* %y, i8* %x) {
+define ptr @strf16_align1(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1915,15 +1693,13 @@ define i8* @strf16_align1(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    adds r0, #3
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 1
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %0 = load <8 x half>, ptr %x, align 2
+  store <8 x half> %0, ptr %z, align 1
+  ret ptr %z
 }
 
-define i8* @strf16_align8(i8* %y, i8* %x) {
+define ptr @strf16_align8(ptr %y, ptr %x) {
 ; CHECK-LE-LABEL: strf16_align8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r1]
@@ -1936,10 +1712,8 @@ define i8* @strf16_align8(i8* %y, i8* %x) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r0, #16]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 16
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 8
-  ret i8* %z
+  %z = getelementptr inbounds i8, ptr %y, i32 16
+  %0 = load <8 x i16>, ptr %x, align 2
+  store <8 x i16> %0, ptr %z, align 8
+  ret ptr %z
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-ldst-regimm.ll b/llvm/test/CodeGen/Thumb2/mve-ldst-regimm.ll
index 43564017f35a2..2a868830e92c7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-ldst-regimm.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-ldst-regimm.ll
@@ -7,152 +7,132 @@
 %struct.s_float16_t = type { [8 x half], [8 x half] }
 %struct.s_float32_t = type { [4 x float], [4 x float] }
 
-define hidden void @fwd_int8_t(%struct.s_int8_t* noalias %v) local_unnamed_addr #0 {
+define hidden void @fwd_int8_t(ptr noalias %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: fwd_int8_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %arrayidx3 = getelementptr inbounds %struct.s_int8_t, %struct.s_int8_t* %v, i32 0, i32 1, i32 0
-  %0 = bitcast %struct.s_int8_t* %v to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %arrayidx3 to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
+  %arrayidx3 = getelementptr inbounds %struct.s_int8_t, ptr %v, i32 0, i32 1, i32 0
+  %0 = load <16 x i8>, ptr %v, align 1
+  store <16 x i8> %0, ptr %arrayidx3, align 1
   ret void
 }
 
-define hidden void @fwd_int16_t(%struct.s_int16_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @fwd_int16_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: fwd_int16_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %arrayidx3 = getelementptr inbounds %struct.s_int16_t, %struct.s_int16_t* %v, i32 0, i32 1, i32 0
-  %0 = bitcast %struct.s_int16_t* %v to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i16* %arrayidx3 to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
+  %arrayidx3 = getelementptr inbounds %struct.s_int16_t, ptr %v, i32 0, i32 1, i32 0
+  %0 = load <8 x i16>, ptr %v, align 2
+  store <8 x i16> %0, ptr %arrayidx3, align 2
   ret void
 }
 
-define hidden void @fwd_int32_t(%struct.s_int32_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @fwd_int32_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: fwd_int32_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %arrayidx3 = getelementptr inbounds %struct.s_int32_t, %struct.s_int32_t* %v, i32 0, i32 1, i32 0
-  %0 = bitcast %struct.s_int32_t* %v to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i32* %arrayidx3 to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
+  %arrayidx3 = getelementptr inbounds %struct.s_int32_t, ptr %v, i32 0, i32 1, i32 0
+  %0 = load <4 x i32>, ptr %v, align 4
+  store <4 x i32> %0, ptr %arrayidx3, align 4
   ret void
 }
 
-define hidden void @fwd_float16_t(%struct.s_float16_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @fwd_float16_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: fwd_float16_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #16
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %arrayidx3 = getelementptr inbounds %struct.s_float16_t, %struct.s_float16_t* %v, i32 0, i32 1, i32 0
-  %0 = bitcast %struct.s_float16_t* %v to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast half* %arrayidx3 to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
+  %arrayidx3 = getelementptr inbounds %struct.s_float16_t, ptr %v, i32 0, i32 1, i32 0
+  %0 = load <8 x half>, ptr %v, align 2
+  store <8 x half> %0, ptr %arrayidx3, align 2
   ret void
 }
 
-define hidden void @fwd_float32_t(%struct.s_float32_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @fwd_float32_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: fwd_float32_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %d = getelementptr inbounds %struct.s_float32_t, %struct.s_float32_t* %v, i32 0, i32 1
-  %0 = bitcast %struct.s_float32_t* %v to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast [4 x float]* %d to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
+  %d = getelementptr inbounds %struct.s_float32_t, ptr %v, i32 0, i32 1
+  %0 = load <4 x i32>, ptr %v, align 4
+  store <4 x i32> %0, ptr %d, align 4
   ret void
 }
 
-define hidden void @bwd_int8_t(%struct.s_int8_t* noalias %v) local_unnamed_addr #0 {
+define hidden void @bwd_int8_t(ptr noalias %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: bwd_int8_t:
 ; CHECK:       @ %bb.0: @ %for.end
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, #-16]
 ; CHECK-NEXT:    bx lr
 for.end:
-  %0 = bitcast %struct.s_int8_t* %v to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %arrayidx3 = getelementptr inbounds %struct.s_int8_t, %struct.s_int8_t* %v, i32 -1, i32 1, i32 0
-  %2 = bitcast i8* %arrayidx3 to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
+  %0 = load <16 x i8>, ptr %v, align 1
+  %arrayidx3 = getelementptr inbounds %struct.s_int8_t, ptr %v, i32 -1, i32 1, i32 0
+  store <16 x i8> %0, ptr %arrayidx3, align 1
   ret void
 }
 
-define hidden void @bwd_int16_t(%struct.s_int16_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @bwd_int16_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: bwd_int16_t:
 ; CHECK:       @ %bb.0: @ %for.end
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, #-16]
 ; CHECK-NEXT:    bx lr
 for.end:
-  %0 = bitcast %struct.s_int16_t* %v to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %arrayidx3 = getelementptr inbounds %struct.s_int16_t, %struct.s_int16_t* %v, i32 -1, i32 1, i32 0
-  %2 = bitcast i16* %arrayidx3 to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
+  %0 = load <8 x i16>, ptr %v, align 2
+  %arrayidx3 = getelementptr inbounds %struct.s_int16_t, ptr %v, i32 -1, i32 1, i32 0
+  store <8 x i16> %0, ptr %arrayidx3, align 2
   ret void
 }
 
-define hidden void @bwd_int32_t(%struct.s_int32_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @bwd_int32_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: bwd_int32_t:
 ; CHECK:       @ %bb.0: @ %for.end
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #-16]
 ; CHECK-NEXT:    bx lr
 for.end:
-  %0 = bitcast %struct.s_int32_t* %v to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %arrayidx3 = getelementptr inbounds %struct.s_int32_t, %struct.s_int32_t* %v, i32 -1, i32 1, i32 0
-  %2 = bitcast i32* %arrayidx3 to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
+  %0 = load <4 x i32>, ptr %v, align 4
+  %arrayidx3 = getelementptr inbounds %struct.s_int32_t, ptr %v, i32 -1, i32 1, i32 0
+  store <4 x i32> %0, ptr %arrayidx3, align 4
   ret void
 }
 
-define hidden void @bwd_float16_t(%struct.s_float16_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @bwd_float16_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: bwd_float16_t:
 ; CHECK:       @ %bb.0: @ %for.end
 ; CHECK-NEXT:    vldrh.u16 q0, [r0], #-16
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 for.end:
-  %0 = bitcast %struct.s_float16_t* %v to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %arrayidx3 = getelementptr inbounds %struct.s_float16_t, %struct.s_float16_t* %v, i32 -1, i32 1, i32 0
-  %2 = bitcast half* %arrayidx3 to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
+  %0 = load <8 x half>, ptr %v, align 2
+  %arrayidx3 = getelementptr inbounds %struct.s_float16_t, ptr %v, i32 -1, i32 1, i32 0
+  store <8 x half> %0, ptr %arrayidx3, align 2
   ret void
 }
 
-define hidden void @bwd_float32_t(%struct.s_float32_t* noalias nocapture %v) local_unnamed_addr #0 {
+define hidden void @bwd_float32_t(ptr noalias nocapture %v) local_unnamed_addr #0 {
 ; CHECK-LABEL: bwd_float32_t:
 ; CHECK:       @ %bb.0: @ %for.end
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, #-16]
 ; CHECK-NEXT:    bx lr
 for.end:
-  %0 = bitcast %struct.s_float32_t* %v to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %d = getelementptr inbounds %struct.s_float32_t, %struct.s_float32_t* %v, i32 -1, i32 1
-  %2 = bitcast [4 x float]* %d to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
+  %0 = load <4 x i32>, ptr %v, align 4
+  %d = getelementptr inbounds %struct.s_float32_t, ptr %v, i32 -1, i32 1
+  store <4 x i32> %0, ptr %d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-loadstore.ll b/llvm/test/CodeGen/Thumb2/mve-loadstore.ll
index 70b1db7ac50af..5091a5f336b08 100644
--- a/llvm/test/CodeGen/Thumb2/mve-loadstore.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-loadstore.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE
 
-define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4(<4 x i32>* %vp) {
+define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4(ptr %vp) {
 ; CHECK-LE-LABEL: load_4xi32_a4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0]
@@ -16,12 +16,12 @@ define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4(<4 x i32>* %vp) {
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %vp, align 4
+  %0 = load <4 x i32>, ptr %vp, align 4
   %1 = lshr <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a2(<4 x i32>* %vp) {
+define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a2(ptr %vp) {
 ; CHECK-LE-LABEL: load_4xi32_a2:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q0, [r0]
@@ -36,12 +36,12 @@ define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a2(<4 x i32>* %vp) {
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %vp, align 2
+  %0 = load <4 x i32>, ptr %vp, align 2
   %1 = lshr <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a1(<4 x i32>* %vp) {
+define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a1(ptr %vp) {
 ; CHECK-LE-LABEL: load_4xi32_a1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrb.u8 q0, [r0]
@@ -56,12 +56,12 @@ define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a1(<4 x i32>* %vp) {
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %vp, align 1
+  %0 = load <4 x i32>, ptr %vp, align 1
   %1 = lshr <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %1
 }
 
-define arm_aapcs_vfpcc void @store_4xi32_a4(<4 x i32>* %vp, <4 x i32> %val) {
+define arm_aapcs_vfpcc void @store_4xi32_a4(ptr %vp, <4 x i32> %val) {
 ; CHECK-LE-LABEL: store_4xi32_a4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vshr.u32 q0, q0, #1
@@ -76,11 +76,11 @@ define arm_aapcs_vfpcc void @store_4xi32_a4(<4 x i32>* %vp, <4 x i32> %val) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = lshr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %0, <4 x i32>* %vp, align 4
+  store <4 x i32> %0, ptr %vp, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_4xi32_a2(<4 x i32>* %vp, <4 x i32> %val) {
+define arm_aapcs_vfpcc void @store_4xi32_a2(ptr %vp, <4 x i32> %val) {
 ; CHECK-LE-LABEL: store_4xi32_a2:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vshr.u32 q0, q0, #1
@@ -96,11 +96,11 @@ define arm_aapcs_vfpcc void @store_4xi32_a2(<4 x i32>* %vp, <4 x i32> %val) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = lshr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %0, <4 x i32>* %vp, align 2
+  store <4 x i32> %0, ptr %vp, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_4xi32_a1(<4 x i32>* %vp, <4 x i32> %val) {
+define arm_aapcs_vfpcc void @store_4xi32_a1(ptr %vp, <4 x i32> %val) {
 ; CHECK-LE-LABEL: store_4xi32_a1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vshr.u32 q0, q0, #1
@@ -116,11 +116,11 @@ define arm_aapcs_vfpcc void @store_4xi32_a1(<4 x i32>* %vp, <4 x i32> %val) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = lshr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %0, <4 x i32>* %vp, align 1
+  store <4 x i32> %0, ptr %vp, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_pos(i32* %ip) {
+define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_pos(ptr %ip) {
 ; CHECK-LE-LABEL: load_4xi32_a4_offset_pos:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0, #508]
@@ -133,13 +133,12 @@ define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_pos(i32* %ip) {
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %ipoffset = getelementptr inbounds i32, i32* %ip, i32 127
-  %vp = bitcast i32* %ipoffset to <4 x i32>*
-  %0 = load <4 x i32>, <4 x i32>* %vp, align 4
+  %ipoffset = getelementptr inbounds i32, ptr %ip, i32 127
+  %0 = load <4 x i32>, ptr %ipoffset, align 4
   ret <4 x i32> %0
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_neg(i32* %ip) {
+define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_neg(ptr %ip) {
 ; CHECK-LE-LABEL: load_4xi32_a4_offset_neg:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrw.u32 q0, [r0, #-508]
@@ -152,9 +151,8 @@ define arm_aapcs_vfpcc <4 x i32> @load_4xi32_a4_offset_neg(i32* %ip) {
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %ipoffset = getelementptr inbounds i32, i32* %ip, i32 -127
-  %vp = bitcast i32* %ipoffset to <4 x i32>*
-  %0 = load <4 x i32>, <4 x i32>* %vp, align 4
+  %ipoffset = getelementptr inbounds i32, ptr %ip, i32 -127
+  %0 = load <4 x i32>, ptr %ipoffset, align 4
   ret <4 x i32> %0
 }
 
@@ -189,16 +187,12 @@ define arm_aapcs_vfpcc <4 x i32> @loadstore_4xi32_stack_off16() {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = alloca [1 x [5 x [2 x i32]]], align 4
-  %0 = bitcast [1 x [5 x [2 x i32]]]* %c to i8*
-  %arrayidx5 = getelementptr inbounds [1 x [5 x [2 x i32]]], [1 x [5 x [2 x i32]]]* %c, i32 0, i32 0, i32 0, i32 0
-  %1 = bitcast [1 x [5 x [2 x i32]]]* %c to <4 x i32>*
-  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %1, align 4
-  %arrayidx5.2 = getelementptr inbounds [1 x [5 x [2 x i32]]], [1 x [5 x [2 x i32]]]* %c, i32 0, i32 0, i32 2, i32 0
-  %2 = bitcast i32* %arrayidx5.2 to <4 x i32>*
-  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %2, align 4
-  store i32 3, i32* %arrayidx5.2, align 4
-  %3 = load <4 x i32>, <4 x i32>* %2, align 4
-  ret <4 x i32> %3
+  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr %c, align 4
+  %arrayidx5.2 = getelementptr inbounds [1 x [5 x [2 x i32]]], ptr %c, i32 0, i32 0, i32 2, i32 0
+  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr %arrayidx5.2, align 4
+  store i32 3, ptr %arrayidx5.2, align 4
+  %0 = load <4 x i32>, ptr %arrayidx5.2, align 4
+  ret <4 x i32> %0
 }
 
 define arm_aapcs_vfpcc <8 x i16> @loadstore_8xi16_stack_off16() {
@@ -232,16 +226,12 @@ define arm_aapcs_vfpcc <8 x i16> @loadstore_8xi16_stack_off16() {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = alloca [1 x [10 x [2 x i16]]], align 2
-  %0 = bitcast [1 x [10 x [2 x i16]]]* %c to i8*
-  %arrayidx5 = getelementptr inbounds [1 x [10 x [2 x i16]]], [1 x [10 x [2 x i16]]]* %c, i32 0, i32 0, i32 0, i32 0
-  %1 = bitcast [1 x [10 x [2 x i16]]]* %c to <8 x i16>*
-  store <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>* %1, align 2
-  %arrayidx5.2 = getelementptr inbounds [1 x [10 x [2 x i16]]], [1 x [10 x [2 x i16]]]* %c, i32 0, i32 0, i32 4, i32 0
-  %2 = bitcast i16* %arrayidx5.2 to <8 x i16>*
-  store <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>* %2, align 2
-  store i16 3, i16* %arrayidx5.2, align 2
-  %3 = load <8 x i16>, <8 x i16>* %2, align 2
-  ret <8 x i16> %3
+  store <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, ptr %c, align 2
+  %arrayidx5.2 = getelementptr inbounds [1 x [10 x [2 x i16]]], ptr %c, i32 0, i32 0, i32 4, i32 0
+  store <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, ptr %arrayidx5.2, align 2
+  store i16 3, ptr %arrayidx5.2, align 2
+  %0 = load <8 x i16>, ptr %arrayidx5.2, align 2
+  ret <8 x i16> %0
 }
 
 define arm_aapcs_vfpcc <16 x i8> @loadstore_16xi8_stack_off16() {
@@ -275,14 +265,10 @@ define arm_aapcs_vfpcc <16 x i8> @loadstore_16xi8_stack_off16() {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = alloca [1 x [20 x [2 x i8]]], align 1
-  %0 = bitcast [1 x [20 x [2 x i8]]]* %c to i8*
-  %arrayidx5 = getelementptr inbounds [1 x [20 x [2 x i8]]], [1 x [20 x [2 x i8]]]* %c, i32 0, i32 0, i32 0, i32 0
-  %1 = bitcast [1 x [20 x [2 x i8]]]* %c to <16 x i8>*
-  store <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>* %1, align 1
-  %arrayidx5.2 = getelementptr inbounds [1 x [20 x [2 x i8]]], [1 x [20 x [2 x i8]]]* %c, i32 0, i32 0, i32 8, i32 0
-  %2 = bitcast i8* %arrayidx5.2 to <16 x i8>*
-  store <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>* %2, align 1
-  store i8 3, i8* %arrayidx5.2, align 1
-  %3 = load <16 x i8>, <16 x i8>* %2, align 1
-  ret <16 x i8> %3
+  store <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr %c, align 1
+  %arrayidx5.2 = getelementptr inbounds [1 x [20 x [2 x i8]]], ptr %c, i32 0, i32 0, i32 8, i32 0
+  store <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr %arrayidx5.2, align 1
+  store i8 3, ptr %arrayidx5.2, align 1
+  %0 = load <16 x i8>, ptr %arrayidx5.2, align 1
+  ret <16 x i8> %0
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-offset.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-offset.ll
index 5405d4b7427c6..1934ef155895f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-offset.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-offset.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -11,17 +11,15 @@ define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -31,17 +29,15 @@ define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -51,17 +47,15 @@ define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -70,17 +64,15 @@ define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -90,17 +82,15 @@ define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -109,17 +99,15 @@ define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -129,17 +117,15 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -148,18 +134,16 @@ define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -169,18 +153,16 @@ define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -189,18 +171,16 @@ define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -209,18 +189,16 @@ define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -230,18 +208,16 @@ define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -250,18 +226,16 @@ define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -271,18 +245,16 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -291,18 +263,16 @@ define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -312,18 +282,16 @@ define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -332,18 +300,16 @@ define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -352,18 +318,16 @@ define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -373,18 +337,16 @@ define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -393,18 +355,16 @@ define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -414,18 +374,16 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -434,17 +392,15 @@ define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -454,17 +410,15 @@ define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -473,17 +427,15 @@ define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -492,17 +444,15 @@ define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -512,17 +462,15 @@ define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -531,17 +479,15 @@ define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -551,17 +497,15 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -570,18 +514,16 @@ define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -590,18 +532,16 @@ define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -610,18 +550,16 @@ define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -630,18 +568,16 @@ define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -651,18 +587,16 @@ define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -671,18 +605,16 @@ define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -692,18 +624,16 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -712,18 +642,16 @@ define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -732,18 +660,16 @@ define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -752,18 +678,16 @@ define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -772,18 +696,16 @@ define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -793,18 +715,16 @@ define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -813,18 +733,16 @@ define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -834,18 +752,16 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %x
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -854,18 +770,16 @@ define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -874,18 +788,16 @@ define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -894,18 +806,16 @@ define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -914,18 +824,16 @@ define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -935,18 +843,16 @@ define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -955,18 +861,16 @@ define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -976,18 +880,16 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -996,18 +898,16 @@ define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1016,18 +916,16 @@ define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1036,18 +934,16 @@ define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1056,18 +952,16 @@ define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1077,18 +971,16 @@ define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1097,18 +989,16 @@ define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1118,18 +1008,16 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %x
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1138,17 +1026,15 @@ define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1157,17 +1043,15 @@ define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1176,17 +1060,15 @@ define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1195,17 +1077,15 @@ define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1215,17 +1095,15 @@ define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1234,17 +1112,15 @@ define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1254,17 +1130,15 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %x
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %x
 }
 
-define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1273,17 +1147,15 @@ define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1293,17 +1165,15 @@ define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1313,17 +1183,15 @@ define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1332,17 +1200,15 @@ define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1352,17 +1218,15 @@ define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1371,17 +1235,15 @@ define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1391,17 +1253,15 @@ define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %x
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %x
 }
 
-define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1410,17 +1270,15 @@ define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1430,17 +1288,15 @@ define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1449,17 +1305,15 @@ define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1468,17 +1322,15 @@ define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1488,17 +1340,15 @@ define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1507,17 +1357,15 @@ define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
-define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1527,20 +1375,18 @@ define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %x
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %x
 }
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1549,17 +1395,15 @@ define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1569,17 +1413,15 @@ define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1589,17 +1431,15 @@ define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1608,17 +1448,15 @@ define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1628,17 +1466,15 @@ define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1647,17 +1483,15 @@ define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #-508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1667,17 +1501,15 @@ define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1686,17 +1518,15 @@ define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1706,17 +1536,15 @@ define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1725,17 +1553,15 @@ define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1744,17 +1570,15 @@ define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1764,17 +1588,15 @@ define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1783,17 +1605,15 @@ define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #-254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1803,17 +1623,15 @@ define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1822,17 +1640,15 @@ define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1842,17 +1658,15 @@ define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1861,17 +1675,15 @@ define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1880,17 +1692,15 @@ define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1900,17 +1710,15 @@ define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1919,17 +1727,15 @@ define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #-254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1939,17 +1745,15 @@ define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1958,17 +1762,15 @@ define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1977,17 +1779,15 @@ define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1996,17 +1796,15 @@ define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2015,17 +1813,15 @@ define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2035,17 +1831,15 @@ define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2054,17 +1848,15 @@ define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2074,17 +1866,15 @@ define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2093,17 +1883,15 @@ define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2112,17 +1900,15 @@ define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2131,17 +1917,15 @@ define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2150,17 +1934,15 @@ define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2170,17 +1952,15 @@ define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2189,17 +1969,15 @@ define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2209,17 +1987,15 @@ define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2228,17 +2004,15 @@ define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2247,17 +2021,15 @@ define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #3]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2266,17 +2038,15 @@ define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2285,17 +2055,15 @@ define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2305,17 +2073,15 @@ define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2324,17 +2090,15 @@ define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #-127]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2344,17 +2108,15 @@ define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %y
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2363,17 +2125,15 @@ define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2383,17 +2143,15 @@ define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2403,17 +2161,15 @@ define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2422,17 +2178,15 @@ define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2442,17 +2196,15 @@ define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2461,17 +2213,15 @@ define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #-508]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2481,17 +2231,15 @@ define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %y
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2500,17 +2248,15 @@ define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #4]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2520,17 +2266,15 @@ define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2539,17 +2283,15 @@ define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2558,17 +2300,15 @@ define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2578,17 +2318,15 @@ define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2597,17 +2335,15 @@ define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #-254]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
 }
 
-define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2617,30 +2353,28 @@ define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %y
-}
-
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v4i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
-
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %y
+}
+
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
+
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-postinc.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-postinc.ll
index e505930117ec4..7b4cef17569bf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-postinc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-postinc.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -11,17 +11,15 @@ define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -31,17 +29,15 @@ define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -51,17 +47,15 @@ define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -70,17 +64,15 @@ define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -90,17 +82,15 @@ define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -109,17 +99,15 @@ define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -129,17 +117,15 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -148,18 +134,16 @@ define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -169,18 +153,16 @@ define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -189,18 +171,16 @@ define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -209,18 +189,16 @@ define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -230,18 +208,16 @@ define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -250,18 +226,16 @@ define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -271,18 +245,16 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -291,18 +263,16 @@ define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -312,18 +282,16 @@ define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -332,18 +300,16 @@ define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -352,18 +318,16 @@ define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -373,18 +337,16 @@ define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -393,18 +355,16 @@ define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -414,18 +374,16 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %x, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -434,17 +392,15 @@ define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -454,17 +410,15 @@ define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -473,17 +427,15 @@ define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -492,17 +444,15 @@ define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -512,17 +462,15 @@ define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -531,17 +479,15 @@ define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -551,17 +497,15 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -570,18 +514,16 @@ define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -590,18 +532,16 @@ define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -610,18 +550,16 @@ define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -630,18 +568,16 @@ define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -651,18 +587,16 @@ define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -671,18 +605,16 @@ define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -692,18 +624,16 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -712,18 +642,16 @@ define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -732,18 +660,16 @@ define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -752,18 +678,16 @@ define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -772,18 +696,16 @@ define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -793,18 +715,16 @@ define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -813,18 +733,16 @@ define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -834,18 +752,16 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %x, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -854,18 +770,16 @@ define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -874,18 +788,16 @@ define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -894,18 +806,16 @@ define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -914,18 +824,16 @@ define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -935,18 +843,16 @@ define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -955,18 +861,16 @@ define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -976,18 +880,16 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -996,18 +898,16 @@ define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1016,18 +916,16 @@ define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1036,18 +934,16 @@ define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1056,18 +952,16 @@ define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1077,18 +971,16 @@ define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1097,18 +989,16 @@ define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1118,18 +1008,16 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %x, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1138,17 +1026,15 @@ define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1157,17 +1043,15 @@ define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1176,17 +1060,15 @@ define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1195,17 +1077,15 @@ define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1215,17 +1095,15 @@ define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1234,17 +1112,15 @@ define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1254,17 +1130,15 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1273,17 +1147,15 @@ define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1293,17 +1165,15 @@ define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1313,17 +1183,15 @@ define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1332,17 +1200,15 @@ define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1352,17 +1218,15 @@ define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1371,17 +1235,15 @@ define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1391,17 +1253,15 @@ define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1410,17 +1270,15 @@ define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1430,17 +1288,15 @@ define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1449,17 +1305,15 @@ define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1468,17 +1322,15 @@ define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1488,17 +1340,15 @@ define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1507,17 +1357,15 @@ define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1527,20 +1375,18 @@ define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1549,17 +1395,15 @@ define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1569,17 +1413,15 @@ define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1589,17 +1431,15 @@ define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1608,17 +1448,15 @@ define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1628,17 +1466,15 @@ define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    add.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1647,17 +1483,15 @@ define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #-508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1667,17 +1501,15 @@ define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    sub.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1686,17 +1518,15 @@ define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1706,17 +1536,15 @@ define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1725,17 +1553,15 @@ define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1744,17 +1570,15 @@ define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0], #254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1764,17 +1588,15 @@ define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    add.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1783,17 +1605,15 @@ define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0], #-254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1803,17 +1623,15 @@ define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    sub.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %y, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1822,17 +1640,15 @@ define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1842,17 +1658,15 @@ define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1861,17 +1675,15 @@ define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1880,17 +1692,15 @@ define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1900,17 +1710,15 @@ define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    add.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1919,17 +1727,15 @@ define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #-254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1939,17 +1745,15 @@ define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    sub.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %y to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1958,17 +1762,15 @@ define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1977,17 +1779,15 @@ define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1996,17 +1796,15 @@ define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2015,17 +1813,15 @@ define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2035,17 +1831,15 @@ define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2054,17 +1848,15 @@ define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2074,17 +1866,15 @@ define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %y, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2093,17 +1883,15 @@ define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2112,17 +1900,15 @@ define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2131,17 +1917,15 @@ define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2150,17 +1934,15 @@ define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2170,17 +1952,15 @@ define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2189,17 +1969,15 @@ define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2209,17 +1987,15 @@ define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %y, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2228,17 +2004,15 @@ define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2247,17 +2021,15 @@ define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0], #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2266,17 +2038,15 @@ define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2285,17 +2055,15 @@ define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0], #127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2305,17 +2073,15 @@ define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    adds r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2324,17 +2090,15 @@ define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0], #-127
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2344,17 +2108,15 @@ define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    subs r0, #128
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2363,17 +2125,15 @@ define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2383,17 +2143,15 @@ define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2403,17 +2161,15 @@ define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    adds r0, #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2422,17 +2178,15 @@ define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2442,17 +2196,15 @@ define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    add.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2461,17 +2213,15 @@ define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0], #-508
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2481,17 +2231,15 @@ define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    sub.w r0, r0, #512
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2500,17 +2248,15 @@ define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #4
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2520,17 +2266,15 @@ define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    adds r0, #3
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2539,17 +2283,15 @@ define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #2
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2558,17 +2300,15 @@ define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2578,17 +2318,15 @@ define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    add.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2597,17 +2335,15 @@ define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0], #-254
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2617,30 +2353,28 @@ define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    sub.w r0, r0, #256
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %y to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
-}
-
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
-
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
+}
+
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
+
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-preinc.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-preinc.ll
index 6e74b7a78c0e1..a0edee59b0da2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst-preinc.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst-preinc.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -11,17 +11,15 @@ define i8* @ldrwu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -31,17 +29,15 @@ define i8* @ldrwu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -51,17 +47,15 @@ define i8* @ldrwu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -70,17 +64,15 @@ define i8* @ldrwu32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -90,17 +82,15 @@ define i8* @ldrwu32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -109,17 +99,15 @@ define i8* @ldrwu32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwu32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwu32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -129,17 +117,15 @@ define i8* @ldrwu32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -148,18 +134,16 @@ define i8* @ldrhu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -169,18 +153,16 @@ define i8* @ldrhu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -189,18 +171,16 @@ define i8* @ldrhu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -209,18 +189,16 @@ define i8* @ldrhu32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -230,18 +208,16 @@ define i8* @ldrhu32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -250,18 +226,16 @@ define i8* @ldrhu32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhu32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -271,18 +245,16 @@ define i8* @ldrhu32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = zext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = zext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -291,18 +263,16 @@ define i8* @ldrhs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -312,18 +282,16 @@ define i8* @ldrhs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -332,18 +300,16 @@ define i8* @ldrhs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -352,18 +318,16 @@ define i8* @ldrhs32_254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -373,18 +337,16 @@ define i8* @ldrhs32_256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -393,18 +355,16 @@ define i8* @ldrhs32_m254(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrhs32_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhs32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -414,18 +374,16 @@ define i8* @ldrhs32_m256(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %0, i32 2, <4 x i1> %c, <4 x i16> undef)
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %z, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -434,17 +392,15 @@ define i8* @ldrhu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -454,17 +410,15 @@ define i8* @ldrhu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -473,17 +427,15 @@ define i8* @ldrhu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -492,17 +444,15 @@ define i8* @ldrhu16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -512,17 +462,15 @@ define i8* @ldrhu16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -531,17 +479,15 @@ define i8* @ldrhu16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhu16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhu16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -551,17 +497,15 @@ define i8* @ldrhu16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 2, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -570,18 +514,16 @@ define i8* @ldrbu32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -590,18 +532,16 @@ define i8* @ldrbu32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -610,18 +550,16 @@ define i8* @ldrbu32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -630,18 +568,16 @@ define i8* @ldrbu32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -651,18 +587,16 @@ define i8* @ldrbu32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -671,18 +605,16 @@ define i8* @ldrbu32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbu32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -692,18 +624,16 @@ define i8* @ldrbu32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -712,18 +642,16 @@ define i8* @ldrbs32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -732,18 +660,16 @@ define i8* @ldrbs32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -752,18 +678,16 @@ define i8* @ldrbs32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -772,18 +696,16 @@ define i8* @ldrbs32_127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -793,18 +715,16 @@ define i8* @ldrbs32_128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -813,18 +733,16 @@ define i8* @ldrbs32_m127(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrbs32_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -834,18 +752,16 @@ define i8* @ldrbs32_m128(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %0, i32 1, <4 x i1> %c, <4 x i8> undef)
-  %2 = sext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %2, <4 x i32>* %3, align 4
-  ret i8* %z
+  %0 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %z, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %1 = sext <4 x i8> %0 to <4 x i32>
+  store <4 x i32> %1, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -854,18 +770,16 @@ define i8* @ldrbu16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -874,18 +788,16 @@ define i8* @ldrbu16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -894,18 +806,16 @@ define i8* @ldrbu16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -914,18 +824,16 @@ define i8* @ldrbu16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -935,18 +843,16 @@ define i8* @ldrbu16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -955,18 +861,16 @@ define i8* @ldrbu16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbu16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -976,18 +880,16 @@ define i8* @ldrbu16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = zext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = zext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -996,18 +898,16 @@ define i8* @ldrbs16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1016,18 +916,16 @@ define i8* @ldrbs16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1036,18 +934,16 @@ define i8* @ldrbs16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1056,18 +952,16 @@ define i8* @ldrbs16_127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1077,18 +971,16 @@ define i8* @ldrbs16_128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1097,18 +989,16 @@ define i8* @ldrbs16_m127(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrbs16_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbs16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1118,18 +1008,16 @@ define i8* @ldrbs16_m128(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %0, i32 1, <8 x i1> %c, <8 x i8> undef)
-  %2 = sext <8 x i8> %1 to <8 x i16>
-  %3 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %2, <8 x i16>* %3, align 2
-  ret i8* %z
+  %0 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %z, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %1 = sext <8 x i8> %0 to <8 x i16>
+  store <8 x i16> %1, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1138,17 +1026,15 @@ define i8* @ldrbu8_4(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1157,17 +1043,15 @@ define i8* @ldrbu8_3(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1176,17 +1060,15 @@ define i8* @ldrbu8_2(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1195,17 +1077,15 @@ define i8* @ldrbu8_127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1215,17 +1095,15 @@ define i8* @ldrbu8_128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m127(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1234,17 +1112,15 @@ define i8* @ldrbu8_m127(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -127
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
+define ptr @ldrbu8_m128(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrbu8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r2]
@@ -1254,17 +1130,15 @@ define i8* @ldrbu8_m128(i8* %x, i8* %y, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -128
-  %0 = bitcast i8* %z to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %x, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 1, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 1
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 1, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 1
+  ret ptr %z
 }
 
-define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1273,17 +1147,15 @@ define i8* @ldrwf32_4(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1293,17 +1165,15 @@ define i8* @ldrwf32_3(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1313,17 +1183,15 @@ define i8* @ldrwf32_2(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1332,17 +1200,15 @@ define i8* @ldrwf32_508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 508
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1352,17 +1218,15 @@ define i8* @ldrwf32_512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 512
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m508(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1371,17 +1235,15 @@ define i8* @ldrwf32_m508(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -508
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
+define ptr @ldrwf32_m512(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
@@ -1391,17 +1253,15 @@ define i8* @ldrwf32_m512(i8* %x, i8* %y, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -512
-  %0 = bitcast i8* %z to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %x, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_4(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1410,17 +1270,15 @@ define i8* @ldrhf16_4(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_3(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1430,17 +1288,15 @@ define i8* @ldrhf16_3(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 3
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_2(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1449,17 +1305,15 @@ define i8* @ldrhf16_2(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 2
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1468,17 +1322,15 @@ define i8* @ldrhf16_254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 254
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1488,17 +1340,15 @@ define i8* @ldrhf16_256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 256
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m254(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1507,17 +1357,15 @@ define i8* @ldrhf16_m254(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -254
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
-define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
+define ptr @ldrhf16_m256(ptr %x, ptr %y, ptr %m) {
 ; CHECK-LABEL: ldrhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r2]
@@ -1527,20 +1375,18 @@ define i8* @ldrhf16_m256(i8* %x, i8* %y, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 -256
-  %0 = bitcast i8* %z to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %x, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 2
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 2, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 2
+  ret ptr %z
 }
 
 
 
 
-define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1549,17 +1395,15 @@ define i8* @strw32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1569,17 +1413,15 @@ define i8* @strw32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1589,17 +1431,15 @@ define i8* @strw32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1608,17 +1448,15 @@ define i8* @strw32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1628,17 +1466,15 @@ define i8* @strw32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1647,17 +1483,15 @@ define i8* @strw32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #-508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strw32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strw32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1667,17 +1501,15 @@ define i8* @strw32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x i32>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i32>, ptr %x, align 4
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1686,17 +1518,15 @@ define i8* @strh32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1706,17 +1536,15 @@ define i8* @strh32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1725,17 +1553,15 @@ define i8* @strh32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1744,17 +1570,15 @@ define i8* @strh32_254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1764,17 +1588,15 @@ define i8* @strh32_256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1783,17 +1605,15 @@ define i8* @strh32_m254(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0, #-254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strh32_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh32_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1803,17 +1623,15 @@ define i8* @strh32_m256(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrht.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <4 x i16>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %1, <4 x i16>* %2, i32 2, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %0, ptr %z, i32 2, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1822,17 +1640,15 @@ define i8* @strh16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1842,17 +1658,15 @@ define i8* @strh16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1861,17 +1675,15 @@ define i8* @strh16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1880,17 +1692,15 @@ define i8* @strh16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1900,17 +1710,15 @@ define i8* @strh16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1919,17 +1727,15 @@ define i8* @strh16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #-254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strh16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strh16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -1939,17 +1745,15 @@ define i8* @strh16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x i16>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i8* %z to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i16>, ptr %x, align 2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1958,17 +1762,15 @@ define i8* @strb32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1977,17 +1779,15 @@ define i8* @strb32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -1996,17 +1796,15 @@ define i8* @strb32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2015,17 +1813,15 @@ define i8* @strb32_127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2035,17 +1831,15 @@ define i8* @strb32_128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2054,17 +1848,15 @@ define i8* @strb32_m127(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strb32_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb32_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2074,17 +1866,15 @@ define i8* @strb32_m128(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrbt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <4 x i8>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <4 x i8>*
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %1, <4 x i8>* %2, i32 1, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %0, ptr %z, i32 1, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2093,17 +1883,15 @@ define i8* @strb16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2112,17 +1900,15 @@ define i8* @strb16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2131,17 +1917,15 @@ define i8* @strb16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2150,17 +1934,15 @@ define i8* @strb16_127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2170,17 +1952,15 @@ define i8* @strb16_128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2189,17 +1969,15 @@ define i8* @strb16_m127(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strb16_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb16_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2209,17 +1987,15 @@ define i8* @strb16_m128(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrbt.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <8 x i8>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %1, <8 x i8>* %2, i32 1, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %0, ptr %z, i32 1, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2228,17 +2004,15 @@ define i8* @strb8_4(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2247,17 +2021,15 @@ define i8* @strb8_3(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #3]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2266,17 +2038,15 @@ define i8* @strb8_2(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2285,17 +2055,15 @@ define i8* @strb8_127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2305,17 +2073,15 @@ define i8* @strb8_128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m127(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m127:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2324,17 +2090,15 @@ define i8* @strb8_m127(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0, #-127]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -127
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -127
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
+define ptr @strb8_m128(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strb8_m128:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r2]
@@ -2344,17 +2108,15 @@ define i8* @strb8_m128(i8* %y, i8* %x, <16 x i8> *%m) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -128
-  %0 = bitcast i8* %x to <16 x i8>*
-  %mask = load <16 x i8>, <16 x i8>* %m, align 1
+  %z = getelementptr inbounds i8, ptr %y, i32 -128
+  %mask = load <16 x i8>, ptr %m, align 1
   %c = icmp ne <16 x i8> %mask, zeroinitializer
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %z to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  %0 = load <16 x i8>, ptr %x, align 1
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2363,17 +2125,15 @@ define i8* @strwf32_4(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2383,17 +2143,15 @@ define i8* @strwf32_3(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2403,17 +2161,15 @@ define i8* @strwf32_2(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2422,17 +2178,15 @@ define i8* @strwf32_508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2442,17 +2196,15 @@ define i8* @strwf32_512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m508(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m508:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2461,17 +2213,15 @@ define i8* @strwf32_m508(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, #-508]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -508
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -508
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
+define ptr @strwf32_m512(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strwf32_m512:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r2]
@@ -2481,17 +2231,15 @@ define i8* @strwf32_m512(i8* %y, i8* %x, <4 x i32> *%m) {
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -512
-  %0 = bitcast i8* %x to <4 x float>*
-  %mask = load <4 x i32>, <4 x i32>* %m, align 4
+  %z = getelementptr inbounds i8, ptr %y, i32 -512
+  %mask = load <4 x i32>, ptr %m, align 4
   %c = icmp ne <4 x i32> %mask, zeroinitializer
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  %0 = load <4 x float>, ptr %x, align 4
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_4(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2500,17 +2248,15 @@ define i8* @strhf16_4(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #4]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_3(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2520,17 +2266,15 @@ define i8* @strhf16_3(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 3
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 3
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_2(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2539,17 +2283,15 @@ define i8* @strhf16_2(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #2]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 2
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 2
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2558,17 +2300,15 @@ define i8* @strhf16_254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2578,17 +2318,15 @@ define i8* @strhf16_256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m254(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m254:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2597,17 +2335,15 @@ define i8* @strhf16_m254(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0, #-254]!
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -254
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -254
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
+define ptr @strhf16_m256(ptr %y, ptr %x, ptr %m) {
 ; CHECK-LABEL: strhf16_m256:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r2]
@@ -2617,30 +2353,28 @@ define i8* @strhf16_m256(i8* %y, i8* %x, <8 x i16> *%m) {
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 -256
-  %0 = bitcast i8* %x to <8 x half>*
-  %mask = load <8 x i16>, <8 x i16>* %m, align 2
+  %z = getelementptr inbounds i8, ptr %y, i32 -256
+  %mask = load <8 x i16>, ptr %m, align 2
   %c = icmp ne <8 x i16> %mask, zeroinitializer
-  %1 = load <8 x half>, <8 x half>* %0, align 2
-  %2 = bitcast i8* %z to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
-}
-
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
-
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+  %0 = load <8 x half>, ptr %x, align 2
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
+}
+
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
+
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
index c3d4276c712c6..ab43ff1135802 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
 
-define void @foo_v4i32_v4i32(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+define void @foo_v4i32_v4i32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_v4i32_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -11,14 +11,14 @@ define void @foo_v4i32_v4i32(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i32> *%src
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_sext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
+define void @foo_sext_v4i32_v4i8(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_sext_v4i32_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -27,15 +27,15 @@ define void @foo_sext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %src, i32 1, <4 x i1> %1, <4 x i8> undef)
   %3 = sext <4 x i8> %2 to <4 x i32>
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_sext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
+define void @foo_sext_v4i32_v4i16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_sext_v4i32_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -44,15 +44,15 @@ define void @foo_sext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16>
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %src, i32 2, <4 x i1> %1, <4 x i16> undef)
   %3 = sext <4 x i16> %2 to <4 x i32>
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_zext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
+define void @foo_zext_v4i32_v4i8(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_zext_v4i32_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -61,15 +61,15 @@ define void @foo_zext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %2 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %src, i32 1, <4 x i1> %1, <4 x i8> undef)
   %3 = zext <4 x i8> %2 to <4 x i32>
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_zext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
+define void @foo_zext_v4i32_v4i16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_zext_v4i32_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -78,15 +78,15 @@ define void @foo_zext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16>
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %2 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %src, i32 2, <4 x i1> %1, <4 x i16> undef)
   %3 = zext <4 x i16> %2 to <4 x i32>
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_sext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
+define void @foo_sext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_sext_v2i64_v2i32:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r4, r5, r7, lr}
@@ -202,15 +202,15 @@ define void @foo_sext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32>
 ; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %mask, align 4
+  %0 = load <2 x i32>, ptr %mask, align 4
   %1 = icmp sgt <2 x i32> %0, zeroinitializer
-  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %src, i32 4, <2 x i1> %1, <2 x i32> undef)
+  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %src, i32 4, <2 x i1> %1, <2 x i32> undef)
   %3 = sext <2 x i32> %2 to <2 x i64>
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 8, <2 x i1> %1)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %3, ptr %dest, i32 8, <2 x i1> %1)
   ret void
 }
 
-define void @foo_sext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
+define void @foo_sext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_sext_v2i64_v2i32_unaligned:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r4, r5, r7, lr}
@@ -330,15 +330,15 @@ define void @foo_sext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask,
 ; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %mask, align 4
+  %0 = load <2 x i32>, ptr %mask, align 4
   %1 = icmp sgt <2 x i32> %0, zeroinitializer
-  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %src, i32 2, <2 x i1> %1, <2 x i32> undef)
+  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %src, i32 2, <2 x i1> %1, <2 x i32> undef)
   %3 = sext <2 x i32> %2 to <2 x i64>
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 4, <2 x i1> %1)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %3, ptr %dest, i32 4, <2 x i1> %1)
   ret void
 }
 
-define void @foo_zext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
+define void @foo_zext_v2i64_v2i32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r4, lr}
@@ -444,15 +444,15 @@ define void @foo_zext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32>
 ; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %mask, align 4
+  %0 = load <2 x i32>, ptr %mask, align 4
   %1 = icmp sgt <2 x i32> %0, zeroinitializer
-  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %src, i32 4, <2 x i1> %1, <2 x i32> undef)
+  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %src, i32 4, <2 x i1> %1, <2 x i32> undef)
   %3 = zext <2 x i32> %2 to <2 x i64>
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 8, <2 x i1> %1)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %3, ptr %dest, i32 8, <2 x i1> %1)
   ret void
 }
 
-define void @foo_zext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
+define void @foo_zext_v2i64_v2i32_unaligned(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32_unaligned:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r4, lr}
@@ -562,15 +562,15 @@ define void @foo_zext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask,
 ; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %mask, align 4
+  %0 = load <2 x i32>, ptr %mask, align 4
   %1 = icmp sgt <2 x i32> %0, zeroinitializer
-  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>* %src, i32 2, <2 x i1> %1, <2 x i32> undef)
+  %2 = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %src, i32 2, <2 x i1> %1, <2 x i32> undef)
   %3 = zext <2 x i32> %2 to <2 x i64>
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 4, <2 x i1> %1)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %3, ptr %dest, i32 4, <2 x i1> %1)
   ret void
 }
 
-define void @foo_v8i16_v8i16(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
+define void @foo_v8i16_v8i16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_v8i16_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -579,14 +579,14 @@ define void @foo_v8i16_v8i16(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i16> *%src
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %0 = load <8 x i16>, ptr %mask, align 2
   %1 = icmp sgt <8 x i16> %0, zeroinitializer
-  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %2, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %src, i32 2, <8 x i1> %1, <8 x i16> undef)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %2, ptr %dest, i32 2, <8 x i1> %1)
   ret void
 }
 
-define void @foo_sext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
+define void @foo_sext_v8i16_v8i8(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_sext_v8i16_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -595,15 +595,15 @@ define void @foo_sext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %0 = load <8 x i16>, ptr %mask, align 2
   %1 = icmp sgt <8 x i16> %0, zeroinitializer
-  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %src, i32 1, <8 x i1> %1, <8 x i8> undef)
   %3 = sext <8 x i8> %2 to <8 x i16>
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %3, ptr %dest, i32 2, <8 x i1> %1)
   ret void
 }
 
-define void @foo_zext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
+define void @foo_zext_v8i16_v8i8(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_zext_v8i16_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -612,15 +612,15 @@ define void @foo_zext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %0 = load <8 x i16>, ptr %mask, align 2
   %1 = icmp sgt <8 x i16> %0, zeroinitializer
-  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %2 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %src, i32 1, <8 x i1> %1, <8 x i8> undef)
   %3 = zext <8 x i8> %2 to <8 x i16>
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %3, ptr %dest, i32 2, <8 x i1> %1)
   ret void
 }
 
-define void @foo_v16i8_v16i8(<16 x i8> *%dest, <16 x i8> *%mask, <16 x i8> *%src) {
+define void @foo_v16i8_v16i8(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_v16i8_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r1]
@@ -629,14 +629,14 @@ define void @foo_v16i8_v16i8(<16 x i8> *%dest, <16 x i8> *%mask, <16 x i8> *%src
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %mask, align 1
+  %0 = load <16 x i8>, ptr %mask, align 1
   %1 = icmp sgt <16 x i8> %0, zeroinitializer
-  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %src, i32 1, <16 x i1> %1, <16 x i8> undef)
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %2, <16 x i8>* %dest, i32 1, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %src, i32 1, <16 x i1> %1, <16 x i8> undef)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %2, ptr %dest, i32 1, <16 x i1> %1)
   ret void
 }
 
-define void @foo_trunc_v8i8_v8i16(<8 x i8> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
+define void @foo_trunc_v8i8_v8i16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_trunc_v8i8_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -645,15 +645,15 @@ define void @foo_trunc_v8i8_v8i16(<8 x i8> *%dest, <8 x i16> *%mask, <8 x i16> *
 ; CHECK-NEXT:    vstrbt.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %0 = load <8 x i16>, ptr %mask, align 2
   %1 = icmp sgt <8 x i16> %0, zeroinitializer
-  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %2 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %src, i32 2, <8 x i1> %1, <8 x i16> undef)
   %3 = trunc <8 x i16> %2 to <8 x i8>
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %3, <8 x i8>* %dest, i32 1, <8 x i1> %1)
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %3, ptr %dest, i32 1, <8 x i1> %1)
   ret void
 }
 
-define void @foo_trunc_v4i8_v4i32(<4 x i8> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+define void @foo_trunc_v4i8_v4i32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_trunc_v4i8_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -662,15 +662,15 @@ define void @foo_trunc_v4i8_v4i32(<4 x i8> *%dest, <4 x i32> *%mask, <4 x i32> *
 ; CHECK-NEXT:    vstrbt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src, i32 4, <4 x i1> %1, <4 x i32> undef)
   %3 = trunc <4 x i32> %2 to <4 x i8>
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %3, <4 x i8>* %dest, i32 1, <4 x i1> %1)
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %3, ptr %dest, i32 1, <4 x i1> %1)
   ret void
 }
 
-define void @foo_trunc_v4i16_v4i32(<4 x i16> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+define void @foo_trunc_v4i16_v4i32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_trunc_v4i16_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -679,15 +679,15 @@ define void @foo_trunc_v4i16_v4i32(<4 x i16> *%dest, <4 x i32> *%mask, <4 x i32>
 ; CHECK-NEXT:    vstrht.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src, i32 4, <4 x i1> %1, <4 x i32> undef)
   %3 = trunc <4 x i32> %2 to <4 x i16>
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %3, <4 x i16>* %dest, i32 2, <4 x i1> %1)
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %3, ptr %dest, i32 2, <4 x i1> %1)
   ret void
 }
 
-define void @foo_v4f32_v4f32(<4 x float> *%dest, <4 x i32> *%mask, <4 x float> *%src) {
+define void @foo_v4f32_v4f32(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_v4f32_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -696,14 +696,14 @@ define void @foo_v4f32_v4f32(<4 x float> *%dest, <4 x i32> *%mask, <4 x float> *
 ; CHECK-NEXT:    vstrwt.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %0 = load <4 x i32>, ptr %mask, align 4
   %1 = icmp sgt <4 x i32> %0, zeroinitializer
-  %2 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %src, i32 4, <4 x i1> %1, <4 x float> undef)
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %2, <4 x float>* %dest, i32 4, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %src, i32 4, <4 x i1> %1, <4 x float> undef)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %dest, i32 4, <4 x i1> %1)
   ret void
 }
 
-define void @foo_v8f16_v8f16(<8 x half> *%dest, <8 x i16> *%mask, <8 x half> *%src) {
+define void @foo_v8f16_v8f16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LABEL: foo_v8f16_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -712,14 +712,14 @@ define void @foo_v8f16_v8f16(<8 x half> *%dest, <8 x i16> *%mask, <8 x half> *%s
 ; CHECK-NEXT:    vstrht.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %0 = load <8 x i16>, ptr %mask, align 2
   %1 = icmp sgt <8 x i16> %0, zeroinitializer
-  %2 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %src, i32 2, <8 x i1> %1, <8 x half> undef)
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %2, <8 x half>* %dest, i32 2, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %src, i32 2, <8 x i1> %1, <8 x half> undef)
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %2, ptr %dest, i32 2, <8 x i1> %1)
   ret void
 }
 
-define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%src) {
+define void @foo_v4f32_v4f16(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_v4f32_v4f16:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -902,15 +902,15 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
 ; CHECK-BE-NEXT:    bne .LBB18_4
 ; CHECK-BE-NEXT:    b .LBB18_5
 entry:
-  %0 = load <4 x i16>, <4 x i16>* %mask, align 2
+  %0 = load <4 x i16>, ptr %mask, align 2
   %1 = icmp sgt <4 x i16> %0, zeroinitializer
-  %2 = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %src, i32 2, <4 x i1> %1, <4 x half> undef)
+  %2 = call <4 x half> @llvm.masked.load.v4f16.p0(ptr %src, i32 2, <4 x i1> %1, <4 x half> undef)
   %3 = fpext <4 x half> %2 to <4 x float>
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %dest, i32 2, <4 x i1> %1)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %dest, i32 2, <4 x i1> %1)
   ret void
 }
 
-define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%src) {
+define void @foo_v4f32_v4f16_unaligned(ptr %dest, ptr %mask, ptr %src) {
 ; CHECK-LE-LABEL: foo_v4f32_v4f16_unaligned:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -1093,31 +1093,31 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
 ; CHECK-BE-NEXT:    bne .LBB19_4
 ; CHECK-BE-NEXT:    b .LBB19_5
 entry:
-  %0 = load <4 x i16>, <4 x i16>* %mask, align 2
+  %0 = load <4 x i16>, ptr %mask, align 2
   %1 = icmp sgt <4 x i16> %0, zeroinitializer
-  %2 = call <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>* %src, i32 2, <4 x i1> %1, <4 x half> undef)
+  %2 = call <4 x half> @llvm.masked.load.v4f16.p0(ptr %src, i32 2, <4 x i1> %1, <4 x half> undef)
   %3 = fpext <4 x half> %2 to <4 x float>
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %dest, i32 1, <4 x i1> %1)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %dest, i32 1, <4 x i1> %1)
   ret void
 }
 
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <2 x i32> @llvm.masked.load.v2i32.p0v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <4 x half> @llvm.masked.load.v4f16.p0v4f16(<4 x half>*, i32, <4 x i1>, <4 x half>)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
+declare <4 x half> @llvm.masked.load.v4f16.p0(ptr, i32, <4 x i1>, <4 x half>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
 
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v2i64.p0(<2 x i64>, ptr, i32, <2 x i1>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
index f250731ea6a7c..b0a3a6354daa7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE
 
-define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -18,11 +18,11 @@ define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer)
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer)
   ret <4 x i32> %l
 }
 
-define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_align4_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -38,11 +38,11 @@ define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> undef)
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x i32> undef)
   ret <4 x i32> %l
 }
 
-define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -124,11 +124,11 @@ define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 1, <4 x i1> %c, <4 x i32> undef)
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i32> undef)
   ret <4 x i32> %l
 }
 
-define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_align4_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -146,11 +146,11 @@ define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> %a)
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x i32> %a)
   ret <4 x i32> %l
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_zero(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -166,12 +166,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_zero(<4 x i16> *%de
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
   %ext = zext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -187,12 +187,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_undef(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
   %ext = zext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align1_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: zext16_masked_v4i32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -276,12 +276,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align1_undef(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
   %ext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_other(ptr %dest, <4 x i16> %a) {
 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.u16 q1, q0
@@ -303,12 +303,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_other(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i16> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
   %ext = zext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_zero(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -324,12 +324,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_zero(<4 x i16> *%de
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
   %sext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -345,12 +345,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_undef(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
   %sext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align1_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: sext16_masked_v4i32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -434,12 +434,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align1_undef(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
   %sext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %sext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_other(ptr %dest, <4 x i16> %a) {
 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s16 q0, q0
@@ -459,12 +459,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_other(<4 x i16> *%d
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i16> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
   %sext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %sext
 }
 
-define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
+define arm_aapcs_vfpcc ptr @masked_v4i32_preinc(ptr %x, ptr %y, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_preinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -480,16 +480,14 @@ define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x i32>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
+define arm_aapcs_vfpcc ptr @masked_v4i32_postinc(ptr %x, ptr %y, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_postinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -505,16 +503,14 @@ define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
-  %2 = bitcast i8* %y to <4 x i32>*
-  store <4 x i32> %1, <4 x i32>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x i32> undef)
+  store <4 x i32> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -530,11 +526,11 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer)
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer)
   ret <8 x i16> %l
 }
 
-define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align2_undef(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align2_undef(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align2_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -550,11 +546,11 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align2_undef(<8 x i16> *%dest, <8
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> undef)
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x i16> undef)
   ret <8 x i16> %l
 }
 
-define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -694,11 +690,11 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 1, <8 x i1> %c, <8 x i16> undef)
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i16> undef)
   ret <8 x i16> %l
 }
 
-define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align4_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -716,11 +712,11 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> %a)
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x i16> %a)
   ret <8 x i16> %l
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_zero(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -738,12 +734,12 @@ define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_zero(<8 x i8> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
   %ext = sext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_undef(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -761,12 +757,12 @@ define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_undef(<8 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
   %ext = sext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_other(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -786,12 +782,12 @@ define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_other(<8 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
   %ext = sext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_zero(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -811,12 +807,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_zero(<4 x i8> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
   %ext = sext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_undef(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -836,12 +832,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_undef(<4 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
   %ext = sext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_other(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -863,12 +859,12 @@ define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_other(<4 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
   %ext = sext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_zero(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -888,12 +884,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_zero(<4 x i8> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
   %ext = zext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_undef(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -913,12 +909,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_undef(<4 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
   %ext = zext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_other(ptr %dest, <4 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmov.i32 q1, #0xff
@@ -944,12 +940,12 @@ define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_other(<4 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i8> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
   %ext = zext <4 x i8> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_zero(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -967,12 +963,12 @@ define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_zero(<8 x i8> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
   %ext = zext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_undef(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.s8 q0, q0
@@ -990,12 +986,12 @@ define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_undef(<8 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
   %ext = zext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_other(ptr %dest, <8 x i8> %a) {
 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmovlb.u8 q1, q0
@@ -1017,12 +1013,12 @@ define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_other(<8 x i8> *%des
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i8> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
   %ext = zext <8 x i8> %l to <8 x i16>
   ret <8 x i16> %ext
 }
 
-define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
+define ptr @masked_v8i16_preinc(ptr %x, ptr %y, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_preinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -1042,16 +1038,14 @@ define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x i16>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 4
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %z, i32 4, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
+define arm_aapcs_vfpcc ptr @masked_v8i16_postinc(ptr %x, ptr %y, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_postinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1067,17 +1061,15 @@ define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
-  %2 = bitcast i8* %y to <8 x i16>*
-  store <8 x i16> %1, <8 x i16>* %2, align 4
-  ret i8* %z
+  %0 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %x, i32 4, <8 x i1> %c, <8 x i16> undef)
+  store <8 x i16> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16 x i8> %a) {
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(ptr %dest, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -1093,11 +1085,11 @@ define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer)
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer)
   ret <16 x i8> %l
 }
 
-define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <16 x i8> %a) {
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(ptr %dest, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_align4_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -1113,11 +1105,11 @@ define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <1
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> undef)
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %dest, i32 1, <16 x i1> %c, <16 x i8> undef)
   ret <16 x i8> %l
 }
 
-define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <16 x i8> %a) {
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(ptr %dest, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_align4_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -1135,11 +1127,11 @@ define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <1
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> %a)
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %dest, i32 1, <16 x i1> %c, <16 x i8> %a)
   ret <16 x i8> %l
 }
 
-define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) {
+define arm_aapcs_vfpcc ptr @masked_v16i8_preinc(ptr %x, ptr %y, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_preinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -1155,16 +1147,14 @@ define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <16 x i8>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 4
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %z, i32 4, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) {
+define arm_aapcs_vfpcc ptr @masked_v16i8_postinc(ptr %x, ptr %y, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_postinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -1180,17 +1170,15 @@ define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
-  %2 = bitcast i8* %y to <16 x i8>*
-  store <16 x i8> %1, <16 x i8>* %2, align 4
-  ret i8* %z
+  %0 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %x, i32 4, <16 x i1> %c, <16 x i8> undef)
+  store <16 x i8> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1206,11 +1194,11 @@ define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest,
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer)
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer)
   ret <4 x float> %l
 }
 
-define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_align4_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1226,11 +1214,11 @@ define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> undef)
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x float> undef)
   ret <4 x float> %l
 }
 
-define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -1312,11 +1300,11 @@ define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 1, <4 x i1> %c, <4 x float> undef)
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x float> undef)
   ret <4 x float> %l
 }
 
-define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest, <4 x i32> %a, <4 x float> %b) {
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(ptr %dest, <4 x i32> %a, <4 x float> %b) {
 ; CHECK-LE-LABEL: masked_v4f32_align4_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1335,11 +1323,11 @@ define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> %b)
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %dest, i32 4, <4 x i1> %c, <4 x float> %b)
   ret <4 x float> %l
 }
 
-define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
+define arm_aapcs_vfpcc ptr @masked_v4f32_preinc(ptr %x, ptr %y, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_preinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1355,16 +1343,14 @@ define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <4 x float>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %z, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
+define arm_aapcs_vfpcc ptr @masked_v4f32_postinc(ptr %x, ptr %y, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_postinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1380,17 +1366,15 @@ define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
-  %2 = bitcast i8* %y to <4 x float>*
-  store <4 x float> %1, <4 x float>* %2, align 4
-  ret i8* %z
+  %0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %x, i32 4, <4 x i1> %c, <4 x float> undef)
+  store <4 x float> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1406,11 +1390,11 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer)
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer)
   ret <8 x half> %l
 }
 
-define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_align4_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1426,11 +1410,11 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> undef)
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x half> undef)
   ret <8 x half> %l
 }
 
-define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_align1_undef:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #36
@@ -1669,11 +1653,11 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    b .LBB45_9
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 1, <8 x i1> %c, <8 x half> undef)
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x half> undef)
   ret <8 x half> %l
 }
 
-define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest, <8 x i16> %a, <8 x half> %b) {
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(ptr %dest, <8 x i16> %a, <8 x half> %b) {
 ; CHECK-LE-LABEL: masked_v8f16_align4_other:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1692,11 +1676,11 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> %b)
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %dest, i32 2, <8 x i1> %c, <8 x half> %b)
   ret <8 x half> %l
 }
 
-define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
+define arm_aapcs_vfpcc ptr @masked_v8f16_preinc(ptr %x, ptr %y, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_preinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1712,16 +1696,14 @@ define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %z to <8 x half>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 4
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %z, i32 4, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 4
+  ret ptr %z
 }
 
-define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
+define arm_aapcs_vfpcc ptr @masked_v8f16_postinc(ptr %x, ptr %y, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_postinc:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1737,17 +1719,15 @@ define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %x, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
+  %z = getelementptr inbounds i8, ptr %x, i32 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
-  %2 = bitcast i8* %y to <8 x half>*
-  store <8 x half> %1, <8 x half>* %2, align 4
-  ret i8* %z
+  %0 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %x, i32 4, <8 x i1> %c, <8 x half> undef)
+  store <8 x half> %0, ptr %y, align 4
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2 x i64> %a) {
+define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(ptr %dest, <2 x i64> %a) {
 ; CHECK-LE-LABEL: masked_v2i64_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -1824,11 +1804,11 @@ define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2
 ; CHECK-BE-NEXT:    .long 0
 entry:
   %c = icmp sgt <2 x i64> %a, zeroinitializer
-  %l = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer)
+  %l = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer)
   ret <2 x i64> %l
 }
 
-define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
+define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(ptr %dest, <2 x double> %a, <2 x i64> %b) {
 ; CHECK-LE-LABEL: masked_v2f64_align4_zero:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -1905,11 +1885,11 @@ define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%des
 ; CHECK-BE-NEXT:    .long 0
 entry:
   %c = icmp sgt <2 x i64> %b, zeroinitializer
-  %l = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer)
+  %l = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer)
   ret <2 x double> %l
 }
 
-define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: anyext_v4i16:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1925,11 +1905,11 @@ define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
   ret <4 x i16> %l
 }
 
-define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16_align1(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16_align1(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: anyext_v4i16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -2023,11 +2003,11 @@ define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16_align1(<4 x i16> *%dest, <4 x i32
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i16> zeroinitializer)
   ret <4 x i16> %l
 }
 
-define arm_aapcs_vfpcc <4 x i8> @anyext_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i8> @anyext_v4i8(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: anyext_v4i8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -2043,11 +2023,11 @@ define arm_aapcs_vfpcc <4 x i8> @anyext_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
+  %l = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
   ret <4 x i8> %l
 }
 
-define arm_aapcs_vfpcc <8 x i8> @anyext_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i8> @anyext_v8i8(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: anyext_v8i8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -2063,11 +2043,11 @@ define arm_aapcs_vfpcc <8 x i8> @anyext_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
+  %l = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
   ret <8 x i8> %l
 }
 
-define arm_aapcs_vfpcc <4 x i32> @multi_user_zext(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @multi_user_zext(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: multi_user_zext:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -2101,13 +2081,13 @@ define arm_aapcs_vfpcc <4 x i32> @multi_user_zext(<4 x i16> *%dest, <4 x i32> %a
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
   call void @foo(<4 x i16> %l)
   %ext = zext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-define arm_aapcs_vfpcc <4 x i32> @multi_user_sext(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @multi_user_sext(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: multi_user_sext:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -2142,20 +2122,20 @@ define arm_aapcs_vfpcc <4 x i32> @multi_user_sext(<4 x i16> *%dest, <4 x i32> %a
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
+  %l = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
   call void @foo(<4 x i16> %l)
   %ext = sext <4 x i16> %l to <4 x i32>
   ret <4 x i32> %ext
 }
 
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
-declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
-declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32, <8 x i1>, <8 x half>)
+declare <2 x i64> @llvm.masked.load.v2i64.p0(ptr, i32, <2 x i1>, <2 x i64>)
+declare <2 x double> @llvm.masked.load.v2f64.p0(ptr, i32, <2 x i1>, <2 x double>)
 declare void @foo(<4 x i16>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-store-mmo.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store-mmo.ll
index c0f1cbfb63530..1d00696a37210 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-store-mmo.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-store-mmo.ll
@@ -45,32 +45,30 @@ define i32 @incorrectmmo() {
 ; CHECK-NEXT:    .zero 1
 entry:
   %x = alloca [10 x i8], align 1
-  %0 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 10, i8* nonnull %0)
-  %1 = bitcast [10 x i8]* %x to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, <16 x i8>* %1, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
-  %2 = load i8, i8* %0, align 1
-  %conv1 = zext i8 %2 to i32
-  %arrayidx2 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i32 0, i32 1
-  %3 = load i8, i8* %arrayidx2, align 1
-  %conv3 = zext i8 %3 to i32
+  call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %x)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison, i8 poison>, ptr %x, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  %0 = load i8, ptr %x, align 1
+  %conv1 = zext i8 %0 to i32
+  %arrayidx2 = getelementptr inbounds [10 x i8], ptr %x, i32 0, i32 1
+  %1 = load i8, ptr %arrayidx2, align 1
+  %conv3 = zext i8 %1 to i32
   %add = add nuw nsw i32 %conv3, %conv1
-  %arrayidx4 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i32 0, i32 2
-  %4 = load i8, i8* %arrayidx4, align 1
-  %conv5 = zext i8 %4 to i32
+  %arrayidx4 = getelementptr inbounds [10 x i8], ptr %x, i32 0, i32 2
+  %2 = load i8, ptr %arrayidx4, align 1
+  %conv5 = zext i8 %2 to i32
   %add6 = add nuw nsw i32 %add, %conv5
-  %arrayidx7 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i32 0, i32 8
-  %5 = load i8, i8* %arrayidx7, align 1
-  %conv8 = zext i8 %5 to i32
+  %arrayidx7 = getelementptr inbounds [10 x i8], ptr %x, i32 0, i32 8
+  %3 = load i8, ptr %arrayidx7, align 1
+  %conv8 = zext i8 %3 to i32
   %add9 = add nuw nsw i32 %add6, %conv8
-  %arrayidx10 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i32 0, i32 9
-  %6 = load i8, i8* %arrayidx10, align 1
-  %conv11 = zext i8 %6 to i32
+  %arrayidx10 = getelementptr inbounds [10 x i8], ptr %x, i32 0, i32 9
+  %4 = load i8, ptr %arrayidx10, align 1
+  %conv11 = zext i8 %4 to i32
   %add12 = add nuw nsw i32 %add9, %conv11
-  call void @llvm.lifetime.end.p0i8(i64 10, i8* nonnull %0)
+  call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %x)
   ret i32 %add12
 }
 
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
index c212f7c5fbe37..9012fada2bee2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE
 
-define arm_aapcs_vfpcc void @masked_v4i32(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @masked_v4i32(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -17,11 +17,11 @@ define arm_aapcs_vfpcc void @masked_v4i32(<4 x i32> *%dest, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 4, <4 x i1> %c)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a, ptr %dest, i32 4, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4i32_align1(<4 x i32> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @masked_v4i32_align1(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -100,11 +100,11 @@ define arm_aapcs_vfpcc void @masked_v4i32_align1(<4 x i32> *%dest, <4 x i32> %a)
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 1, <4 x i1> %c)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a, ptr %dest, i32 1, <4 x i1> %c)
   ret void
 }
 
-define i8* @masked_v4i32_pre(i8* %y, i8* %x, <4 x i32> %a) {
+define ptr @masked_v4i32_pre(ptr %y, ptr %x, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_pre:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -124,16 +124,14 @@ define i8* @masked_v4i32_pre(i8* %y, i8* %x, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrwt.32 q1, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %z to <4 x i32>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @masked_v4i32_post(i8* %y, i8* %x, <4 x i32> %a) {
+define ptr @masked_v4i32_post(ptr %y, ptr %x, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i32_post:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -153,17 +151,15 @@ define i8* @masked_v4i32_post(i8* %y, i8* %x, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrwt.32 q1, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i8* %y to <4 x i32>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x i32>, ptr %x, align 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc void @masked_v8i16(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc void @masked_v8i16(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -178,11 +174,11 @@ define arm_aapcs_vfpcc void @masked_v8i16(<8 x i16> *%dest, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 2, <8 x i1> %c)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %a, ptr %dest, i32 2, <8 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc void @masked_v8i16_align1(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -319,11 +315,11 @@ define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a)
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 1, <8 x i1> %c)
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %a, ptr %dest, i32 1, <8 x i1> %c)
   ret void
 }
 
-define i8* @masked_v8i16_pre(i8* %y, i8* %x, <8 x i16> %a) {
+define ptr @masked_v8i16_pre(ptr %y, ptr %x, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_pre:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -343,16 +339,14 @@ define i8* @masked_v8i16_pre(i8* %y, i8* %x, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrht.16 q1, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 4
-  %2 = bitcast i8* %z to <8 x i16>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i16>, ptr %x, align 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @masked_v8i16_post(i8* %y, i8* %x, <8 x i16> %a) {
+define ptr @masked_v8i16_post(ptr %y, ptr %x, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i16_post:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -372,17 +366,15 @@ define i8* @masked_v8i16_post(i8* %y, i8* %x, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrht.16 q1, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 4
-  %2 = bitcast i8* %y to <8 x i16>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x i16>, ptr %x, align 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc void @masked_v16i8(<16 x i8> *%dest, <16 x i8> %a) {
+define arm_aapcs_vfpcc void @masked_v16i8(ptr %dest, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s8 gt, q0, zr
@@ -397,11 +389,11 @@ define arm_aapcs_vfpcc void @masked_v16i8(<16 x i8> *%dest, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %a, <16 x i8>* %dest, i32 1, <16 x i1> %c)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %a, ptr %dest, i32 1, <16 x i1> %c)
   ret void
 }
 
-define i8* @masked_v16i8_pre(i8* %y, i8* %x, <16 x i8> %a) {
+define ptr @masked_v16i8_pre(ptr %y, ptr %x, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_pre:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -421,16 +413,14 @@ define i8* @masked_v16i8_pre(i8* %y, i8* %x, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vstrbt.8 q1, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 4
-  %2 = bitcast i8* %z to <16 x i8>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <16 x i8>, ptr %x, align 4
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %z, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @masked_v16i8_post(i8* %y, i8* %x, <16 x i8> %a) {
+define ptr @masked_v16i8_post(ptr %y, ptr %x, <16 x i8> %a) {
 ; CHECK-LE-LABEL: masked_v16i8_post:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -450,17 +440,15 @@ define i8* @masked_v16i8_post(i8* %y, i8* %x, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vstrbt.8 q1, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 4
-  %2 = bitcast i8* %y to <16 x i8>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <16 x i8>, ptr %x, align 4
   %c = icmp sgt <16 x i8> %a, zeroinitializer
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %0, ptr %y, i32 1, <16 x i1> %c)
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc void @masked_v4f32(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) {
+define arm_aapcs_vfpcc void @masked_v4f32(ptr %dest, <4 x float> %a, <4 x i32> %b) {
 ; CHECK-LE-LABEL: masked_v4f32:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.i32 ne, q1, zr
@@ -476,11 +464,11 @@ define arm_aapcs_vfpcc void @masked_v4f32(<4 x float> *%dest, <4 x float> %a, <4
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp ugt <4 x i32> %b, zeroinitializer
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 4, <4 x i1> %c)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %a, ptr %dest, i32 4, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) {
+define arm_aapcs_vfpcc void @masked_v4f32_align1(ptr %dest, <4 x float> %a, <4 x i32> %b) {
 ; CHECK-LE-LABEL: masked_v4f32_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -560,11 +548,11 @@ define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float>
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp ugt <4 x i32> %b, zeroinitializer
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 1, <4 x i1> %c)
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %a, ptr %dest, i32 1, <4 x i1> %c)
   ret void
 }
 
-define i8* @masked_v4f32_pre(i8* %y, i8* %x, <4 x i32> %a) {
+define ptr @masked_v4f32_pre(ptr %y, ptr %x, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_pre:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -584,16 +572,14 @@ define i8* @masked_v4f32_pre(i8* %y, i8* %x, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrwt.32 q1, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %z to <4 x float>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %z, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @masked_v4f32_post(i8* %y, i8* %x, <4 x i32> %a) {
+define ptr @masked_v4f32_post(ptr %y, ptr %x, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4f32_post:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -613,17 +599,15 @@ define i8* @masked_v4f32_post(i8* %y, i8* %x, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vstrwt.32 q1, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 4
-  %2 = bitcast i8* %y to <4 x float>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <4 x float>, ptr %x, align 4
   %c = icmp sgt <4 x i32> %a, zeroinitializer
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %0, ptr %y, i32 4, <4 x i1> %c)
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc void @masked_v8f16(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) {
+define arm_aapcs_vfpcc void @masked_v8f16(ptr %dest, <8 x half> %a, <8 x i16> %b) {
 ; CHECK-LE-LABEL: masked_v8f16:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.i16 ne, q1, zr
@@ -639,11 +623,11 @@ define arm_aapcs_vfpcc void @masked_v8f16(<8 x half> *%dest, <8 x half> %a, <8 x
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp ugt <8 x i16> %b, zeroinitializer
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 2, <8 x i1> %c)
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %a, ptr %dest, i32 2, <8 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) {
+define arm_aapcs_vfpcc void @masked_v8f16_align1(ptr %dest, <8 x half> %a, <8 x i16> %b) {
 ; CHECK-LE-LABEL: masked_v8f16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #36
@@ -869,11 +853,11 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp ugt <8 x i16> %b, zeroinitializer
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 1, <8 x i1> %c)
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %a, ptr %dest, i32 1, <8 x i1> %c)
   ret void
 }
 
-define i8* @masked_v8f16_pre(i8* %y, i8* %x, <8 x i16> %a) {
+define ptr @masked_v8f16_pre(ptr %y, ptr %x, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_pre:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -893,16 +877,14 @@ define i8* @masked_v8f16_pre(i8* %y, i8* %x, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrht.16 q1, [r0, #4]!
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 4
-  %2 = bitcast i8* %z to <8 x half>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x half>, ptr %x, align 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %z, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
-define i8* @masked_v8f16_post(i8* %y, i8* %x, <8 x i16> %a) {
+define ptr @masked_v8f16_post(ptr %y, ptr %x, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8f16_post:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldr d1, [sp]
@@ -922,17 +904,15 @@ define i8* @masked_v8f16_post(i8* %y, i8* %x, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vstrht.16 q1, [r0], #4
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds i8, i8* %y, i32 4
-  %0 = bitcast i8* %x to <8 x half>*
-  %1 = load <8 x half>, <8 x half>* %0, align 4
-  %2 = bitcast i8* %y to <8 x half>*
+  %z = getelementptr inbounds i8, ptr %y, i32 4
+  %0 = load <8 x half>, ptr %x, align 4
   %c = icmp sgt <8 x i16> %a, zeroinitializer
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
-  ret i8* %z
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %y, i32 2, <8 x i1> %c)
+  ret ptr %z
 }
 
 
-define arm_aapcs_vfpcc void @masked_v2i64(<2 x i64> *%dest, <2 x i64> %a) {
+define arm_aapcs_vfpcc void @masked_v2i64(ptr %dest, <2 x i64> %a) {
 ; CHECK-LE-LABEL: masked_v2i64:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -987,11 +967,11 @@ define arm_aapcs_vfpcc void @masked_v2i64(<2 x i64> *%dest, <2 x i64> %a) {
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
   %c = icmp sgt <2 x i64> %a, zeroinitializer
-  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %a, <2 x i64>* %dest, i32 8, <2 x i1> %c)
+  call void @llvm.masked.store.v2i64.p0(<2 x i64> %a, ptr %dest, i32 8, <2 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v2f64(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
+define arm_aapcs_vfpcc void @masked_v2f64(ptr %dest, <2 x double> %a, <2 x i64> %b) {
 ; CHECK-LE-LABEL: masked_v2f64:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .save {r7, lr}
@@ -1046,11 +1026,11 @@ define arm_aapcs_vfpcc void @masked_v2f64(<2 x double> *%dest, <2 x double> %a,
 ; CHECK-BE-NEXT:    pop {r7, pc}
 entry:
   %c = icmp sgt <2 x i64> %b, zeroinitializer
-  call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %a, <2 x double>* %dest, i32 8, <2 x i1> %c)
+  call void @llvm.masked.store.v2f64.p0(<2 x double> %a, ptr %dest, i32 8, <2 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @masked_v4i16(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i16:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1066,11 +1046,11 @@ define arm_aapcs_vfpcc void @masked_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
   %trunc = trunc <4 x i32> %a to <4 x i16>
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %trunc, <4 x i16>* %dest, i32 2, <4 x i1> %c)
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %trunc, ptr %dest, i32 2, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @masked_v4i8(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s32 gt, q0, zr
@@ -1086,11 +1066,11 @@ define arm_aapcs_vfpcc void @masked_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
   %trunc = trunc <4 x i32> %a to <4 x i8>
-  call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> %trunc, <4 x i8>* %dest, i32 1, <4 x i1> %c)
+  call void @llvm.masked.store.v4i8.p0(<4 x i8> %trunc, ptr %dest, i32 1, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
+define arm_aapcs_vfpcc void @masked_v8i8(ptr %dest, <8 x i16> %a) {
 ; CHECK-LE-LABEL: masked_v8i8:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vpt.s16 gt, q0, zr
@@ -1106,11 +1086,11 @@ define arm_aapcs_vfpcc void @masked_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
 entry:
   %c = icmp sgt <8 x i16> %a, zeroinitializer
   %trunc = trunc <8 x i16> %a to <8 x i8>
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %trunc, <8 x i8>* %dest, i32 1, <8 x i1> %c)
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %trunc, ptr %dest, i32 1, <8 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4i16_align1(<4 x i16> *%dest, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @masked_v4i16_align1(ptr %dest, <4 x i32> %a) {
 ; CHECK-LE-LABEL: masked_v4i16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -1190,11 +1170,11 @@ define arm_aapcs_vfpcc void @masked_v4i16_align1(<4 x i16> *%dest, <4 x i32> %a)
 entry:
   %c = icmp sgt <4 x i32> %a, zeroinitializer
   %trunc = trunc <4 x i32> %a to <4 x i16>
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %trunc, <4 x i16>* %dest, i32 1, <4 x i1> %c)
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %trunc, ptr %dest, i32 1, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4f16_align4(<4 x half> *%dest, <4 x float> %a) {
+define arm_aapcs_vfpcc void @masked_v4f16_align4(ptr %dest, <4 x float> %a) {
 ; CHECK-LE-LABEL: masked_v4f16_align4:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -1314,11 +1294,11 @@ define arm_aapcs_vfpcc void @masked_v4f16_align4(<4 x half> *%dest, <4 x float>
 entry:
   %c = fcmp ogt <4 x float> %a, zeroinitializer
   %trunc = fptrunc <4 x float> %a to <4 x half>
-  call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %trunc, <4 x half>* %dest, i32 4, <4 x i1> %c)
+  call void @llvm.masked.store.v4f16.p0(<4 x half> %trunc, ptr %dest, i32 4, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4f16_align2(<4 x half> *%dest, <4 x float> %a) {
+define arm_aapcs_vfpcc void @masked_v4f16_align2(ptr %dest, <4 x float> %a) {
 ; CHECK-LE-LABEL: masked_v4f16_align2:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #4
@@ -1438,11 +1418,11 @@ define arm_aapcs_vfpcc void @masked_v4f16_align2(<4 x half> *%dest, <4 x float>
 entry:
   %c = fcmp ogt <4 x float> %a, zeroinitializer
   %trunc = fptrunc <4 x float> %a to <4 x half>
-  call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %trunc, <4 x half>* %dest, i32 2, <4 x i1> %c)
+  call void @llvm.masked.store.v4f16.p0(<4 x half> %trunc, ptr %dest, i32 2, <4 x i1> %c)
   ret void
 }
 
-define arm_aapcs_vfpcc void @masked_v4f16_align1(<4 x half> *%dest, <4 x float> %a) {
+define arm_aapcs_vfpcc void @masked_v4f16_align1(ptr %dest, <4 x float> %a) {
 ; CHECK-LE-LABEL: masked_v4f16_align1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #20
@@ -1578,18 +1558,18 @@ define arm_aapcs_vfpcc void @masked_v4f16_align1(<4 x half> *%dest, <4 x float>
 entry:
   %c = fcmp ogt <4 x float> %a, zeroinitializer
   %trunc = fptrunc <4 x float> %a to <4 x half>
-  call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %trunc, <4 x half>* %dest, i32 1, <4 x i1> %c)
+  call void @llvm.masked.store.v4f16.p0(<4 x half> %trunc, ptr %dest, i32 1, <4 x i1> %c)
   ret void
 }
 
-declare void @llvm.masked.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v4f16.p0v4f16(<4 x half>, <4 x half>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
-declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v4i8.p0(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f16.p0(<4 x half>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v2i64.p0(<2 x i64>, ptr, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f64.p0(<2 x double>, ptr, i32, <2 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-memtp-branch.ll b/llvm/test/CodeGen/Thumb2/mve-memtp-branch.ll
index 7bd79a23c2595..405c8f8ca9af0 100644
--- a/llvm/test/CodeGen/Thumb2/mve-memtp-branch.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-memtp-branch.ll
@@ -6,7 +6,7 @@
 ; WhileLoopStart
 
 @arr_183 = external dso_local local_unnamed_addr global [20 x [23 x [19 x i8]]], align 1
-define i32 @a(i8 zeroext %b, [3 x i8]* nocapture readonly %c, [3 x i32]* nocapture readonly %d) {
+define i32 @a(i8 zeroext %b, ptr nocapture readonly %c, ptr nocapture readonly %d) {
 ; CHECK-LABEL: a:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -194,172 +194,172 @@ entry:
 
 for.cond.preheader:                               ; preds = %entry
   %cmp43 = icmp ugt i8 %b, 1
-  %arrayidx6 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
+  %arrayidx6 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
   %cmp43.1 = icmp ugt i8 %b, 1
-  %arrayidx6.1 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.1 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
+  %arrayidx6.1 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.1 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
   %cmp43.2 = icmp ugt i8 %b, 1
-  %arrayidx6.2 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.2 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
+  %arrayidx6.2 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.2 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
   %cmp43.3 = icmp ugt i8 %b, 1
-  %arrayidx6.3 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.3 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
+  %arrayidx6.3 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.3 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
   br label %for.cond
 
 for.body.us.preheader:                            ; preds = %entry
-  %arrayidx6.us.1 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.us.1 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
-  %arrayidx6.us.2 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.us.2 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
-  %arrayidx6.us.3 = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 1
-  %arrayidx12.us.3 = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 1
+  %arrayidx6.us.1 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.us.1 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
+  %arrayidx6.us.2 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.us.2 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
+  %arrayidx6.us.3 = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 1
+  %arrayidx12.us.3 = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 1
   br label %for.body.us
 
 for.cond:                                         ; preds = %for.cond.backedge.3, %for.cond.preheader
   br i1 %cmp43, label %for.body.lr.ph, label %for.cond.backedge
 
 for.body.lr.ph:                                   ; preds = %for.cond
-  %0 = load i32, i32* %arrayidx6, align 4
+  %0 = load i32, ptr %arrayidx6, align 4
   %tobool7.not = icmp eq i32 %0, 0
   br i1 %tobool7.not, label %land.end, label %land.rhs
 
 for.body.us:                                      ; preds = %land.end.us.3, %for.body.us.preheader
   %conv44.us = phi i32 [ 0, %for.body.us.preheader ], [ 1, %land.end.us.3 ]
-  %arrayidx6.us = getelementptr inbounds [3 x i32], [3 x i32]* %d, i32 0, i32 %conv44.us
-  %1 = load i32, i32* %arrayidx6.us, align 4
+  %arrayidx6.us = getelementptr inbounds [3 x i32], ptr %d, i32 0, i32 %conv44.us
+  %1 = load i32, ptr %arrayidx6.us, align 4
   %tobool7.not.us = icmp eq i32 %1, 0
   br i1 %tobool7.not.us, label %land.end.us, label %land.rhs.us
 
 land.rhs.us:                                      ; preds = %for.body.us
-  %arrayidx12.us = getelementptr inbounds [3 x i8], [3 x i8]* %c, i32 0, i32 %conv44.us
-  %2 = load i8, i8* %arrayidx12.us, align 1
+  %arrayidx12.us = getelementptr inbounds [3 x i8], ptr %c, i32 0, i32 %conv44.us
+  %2 = load i8, ptr %arrayidx12.us, align 1
   %tobool13.us = zext i8 %2 to i32
   br label %land.end.us
 
 land.end.us:                                      ; preds = %land.rhs.us, %for.body.us
   %3 = phi i32 [ 0, %for.body.us ], [ %tobool13.us, %land.rhs.us ]
-  %scevgep45 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 %conv44.us, i32 %3
+  %scevgep45 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 %conv44.us, i32 %3
   %4 = sub nuw nsw i32 108, %3
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep45, i8 0, i32 %4, i1 false)
-  %5 = load i32, i32* %arrayidx6.us.1, align 4
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep45, i8 0, i32 %4, i1 false)
+  %5 = load i32, ptr %arrayidx6.us.1, align 4
   %tobool7.not.us.1 = icmp eq i32 %5, 0
   br i1 %tobool7.not.us.1, label %land.end.us.1, label %land.rhs.us.1
 
 land.rhs:                                         ; preds = %for.body.lr.ph
-  %6 = load i8, i8* %arrayidx12, align 1
+  %6 = load i8, ptr %arrayidx12, align 1
   %tobool13 = zext i8 %6 to i32
   br label %land.end
 
 land.end:                                         ; preds = %land.rhs, %for.body.lr.ph
   %7 = phi i32 [ 0, %for.body.lr.ph ], [ %tobool13, %land.rhs ]
-  %scevgep = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %7
+  %scevgep = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %7
   %8 = sub nuw nsw i32 108, %7
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep, i8 0, i32 %8, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep, i8 0, i32 %8, i1 false)
   br label %for.cond.backedge
 
 for.cond.backedge:                                ; preds = %land.end, %for.cond
   br i1 %cmp43.1, label %for.body.lr.ph.1, label %for.cond.backedge.1
 
 for.body.lr.ph.1:                                 ; preds = %for.cond.backedge
-  %9 = load i32, i32* %arrayidx6.1, align 4
+  %9 = load i32, ptr %arrayidx6.1, align 4
   %tobool7.not.1 = icmp eq i32 %9, 0
   br i1 %tobool7.not.1, label %land.end.1, label %land.rhs.1
 
 land.rhs.1:                                       ; preds = %for.body.lr.ph.1
-  %10 = load i8, i8* %arrayidx12.1, align 1
+  %10 = load i8, ptr %arrayidx12.1, align 1
   %tobool13.1 = zext i8 %10 to i32
   br label %land.end.1
 
 land.end.1:                                       ; preds = %land.rhs.1, %for.body.lr.ph.1
   %11 = phi i32 [ 0, %for.body.lr.ph.1 ], [ %tobool13.1, %land.rhs.1 ]
-  %scevgep.1 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %11
+  %scevgep.1 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %11
   %12 = sub nuw nsw i32 108, %11
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep.1, i8 0, i32 %12, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep.1, i8 0, i32 %12, i1 false)
   br label %for.cond.backedge.1
 
 for.cond.backedge.1:                              ; preds = %land.end.1, %for.cond.backedge
   br i1 %cmp43.2, label %for.body.lr.ph.2, label %for.cond.backedge.2
 
 for.body.lr.ph.2:                                 ; preds = %for.cond.backedge.1
-  %13 = load i32, i32* %arrayidx6.2, align 4
+  %13 = load i32, ptr %arrayidx6.2, align 4
   %tobool7.not.2 = icmp eq i32 %13, 0
   br i1 %tobool7.not.2, label %land.end.2, label %land.rhs.2
 
 land.rhs.2:                                       ; preds = %for.body.lr.ph.2
-  %14 = load i8, i8* %arrayidx12.2, align 1
+  %14 = load i8, ptr %arrayidx12.2, align 1
   %tobool13.2 = zext i8 %14 to i32
   br label %land.end.2
 
 land.end.2:                                       ; preds = %land.rhs.2, %for.body.lr.ph.2
   %15 = phi i32 [ 0, %for.body.lr.ph.2 ], [ %tobool13.2, %land.rhs.2 ]
-  %scevgep.2 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %15
+  %scevgep.2 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %15
   %16 = sub nuw nsw i32 108, %15
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep.2, i8 0, i32 %16, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep.2, i8 0, i32 %16, i1 false)
   br label %for.cond.backedge.2
 
 for.cond.backedge.2:                              ; preds = %land.end.2, %for.cond.backedge.1
   br i1 %cmp43.3, label %for.body.lr.ph.3, label %for.cond.backedge.3
 
 for.body.lr.ph.3:                                 ; preds = %for.cond.backedge.2
-  %17 = load i32, i32* %arrayidx6.3, align 4
+  %17 = load i32, ptr %arrayidx6.3, align 4
   %tobool7.not.3 = icmp eq i32 %17, 0
   br i1 %tobool7.not.3, label %land.end.3, label %land.rhs.3
 
 land.rhs.3:                                       ; preds = %for.body.lr.ph.3
-  %18 = load i8, i8* %arrayidx12.3, align 1
+  %18 = load i8, ptr %arrayidx12.3, align 1
   %tobool13.3 = zext i8 %18 to i32
   br label %land.end.3
 
 land.end.3:                                       ; preds = %land.rhs.3, %for.body.lr.ph.3
   %19 = phi i32 [ 0, %for.body.lr.ph.3 ], [ %tobool13.3, %land.rhs.3 ]
-  %scevgep.3 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %19
+  %scevgep.3 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %19
   %20 = sub nuw nsw i32 108, %19
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep.3, i8 0, i32 %20, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep.3, i8 0, i32 %20, i1 false)
   br label %for.cond.backedge.3
 
 for.cond.backedge.3:                              ; preds = %land.end.3, %for.cond.backedge.2
   br label %for.cond
 
 land.rhs.us.1:                                    ; preds = %land.end.us
-  %21 = load i8, i8* %arrayidx12.us.1, align 1
+  %21 = load i8, ptr %arrayidx12.us.1, align 1
   %tobool13.us.1 = zext i8 %21 to i32
   br label %land.end.us.1
 
 land.end.us.1:                                    ; preds = %land.rhs.us.1, %land.end.us
   %22 = phi i32 [ 0, %land.end.us ], [ %tobool13.us.1, %land.rhs.us.1 ]
-  %scevgep45.1 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %22
+  %scevgep45.1 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %22
   %23 = sub nuw nsw i32 108, %22
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep45.1, i8 0, i32 %23, i1 false)
-  %24 = load i32, i32* %arrayidx6.us.2, align 4
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep45.1, i8 0, i32 %23, i1 false)
+  %24 = load i32, ptr %arrayidx6.us.2, align 4
   %tobool7.not.us.2 = icmp eq i32 %24, 0
   br i1 %tobool7.not.us.2, label %land.end.us.2, label %land.rhs.us.2
 
 land.rhs.us.2:                                    ; preds = %land.end.us.1
-  %25 = load i8, i8* %arrayidx12.us.2, align 1
+  %25 = load i8, ptr %arrayidx12.us.2, align 1
   %tobool13.us.2 = zext i8 %25 to i32
   br label %land.end.us.2
 
 land.end.us.2:                                    ; preds = %land.rhs.us.2, %land.end.us.1
   %26 = phi i32 [ 0, %land.end.us.1 ], [ %tobool13.us.2, %land.rhs.us.2 ]
-  %scevgep45.2 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %26
+  %scevgep45.2 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %26
   %27 = sub nuw nsw i32 108, %26
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep45.2, i8 0, i32 %27, i1 false)
-  %28 = load i32, i32* %arrayidx6.us.3, align 4
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep45.2, i8 0, i32 %27, i1 false)
+  %28 = load i32, ptr %arrayidx6.us.3, align 4
   %tobool7.not.us.3 = icmp eq i32 %28, 0
   br i1 %tobool7.not.us.3, label %land.end.us.3, label %land.rhs.us.3
 
 land.rhs.us.3:                                    ; preds = %land.end.us.2
-  %29 = load i8, i8* %arrayidx12.us.3, align 1
+  %29 = load i8, ptr %arrayidx12.us.3, align 1
   %tobool13.us.3 = zext i8 %29 to i32
   br label %land.end.us.3
 
 land.end.us.3:                                    ; preds = %land.rhs.us.3, %land.end.us.2
   %30 = phi i32 [ 0, %land.end.us.2 ], [ %tobool13.us.3, %land.rhs.us.3 ]
-  %scevgep45.3 = getelementptr [20 x [23 x [19 x i8]]], [20 x [23 x [19 x i8]]]* @arr_183, i32 0, i32 0, i32 1, i32 %30
+  %scevgep45.3 = getelementptr [20 x [23 x [19 x i8]]], ptr @arr_183, i32 0, i32 0, i32 1, i32 %30
   %31 = sub nuw nsw i32 108, %30
-  call void @llvm.memset.p0i8.i32(i8* align 1 %scevgep45.3, i8 0, i32 %31, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %scevgep45.3, i8 0, i32 %31, i1 false)
   br label %for.body.us
 }
 
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll b/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
index 4028585c03c9d..5f3a12711dc0f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
@@ -6,11 +6,11 @@
 ;    memcpy(dest, src, n);
 ; }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
 
-define void @test1(i8* noalias nocapture %X, i8* noalias nocapture readonly %Y, i32 %n){
+define void @test1(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n){
 ; CHECK-LABEL: test1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -18,7 +18,7 @@ define void @test1(i8* noalias nocapture %X, i8* noalias nocapture readonly %Y,
 ; CHECK-NEXT:    bl __aeabi_memcpy
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %X, i8* align 1 %Y, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %X, ptr align 1 %Y, i32 %n, i1 false)
   ret void
 }
 
@@ -28,7 +28,7 @@ entry:
 ;     memcpy(X, Y, n);
 ; }
 
-define void @test2(i32* noalias %X, i32* noalias readonly %Y, i32 %n){
+define void @test2(ptr noalias %X, ptr noalias readonly %Y, i32 %n){
 ; CHECK-LABEL: test2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -41,9 +41,7 @@ define void @test2(i32* noalias %X, i32* noalias readonly %Y, i32 %n){
 ; CHECK-NEXT:  .LBB1_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
   ret void
 }
 
@@ -54,7 +52,7 @@ entry:
 ;     memcpy(X+2, Y+3, (n*2)+10);
 ; }
 
-define void @test3(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) {
+define void @test3(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) {
 ; CHECK-LABEL: test3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -71,13 +69,11 @@ define void @test3(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y
 ; CHECK-NEXT:  .LBB2_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %X, i32 2
-  %0 = bitcast i32* %add.ptr to i8*
-  %add.ptr1 = getelementptr inbounds i32, i32* %Y, i32 3
-  %1 = bitcast i32* %add.ptr1 to i8*
+  %add.ptr = getelementptr inbounds i32, ptr %X, i32 2
+  %add.ptr1 = getelementptr inbounds i32, ptr %Y, i32 3
   %mul = shl nsw i32 %n, 1
   %add = add nsw i32 %mul, 10
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* nonnull align 4 %0, i8* nonnull align 4 %1, i32 %add, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr nonnull align 4 %add.ptr, ptr nonnull align 4 %add.ptr1, i32 %add, i1 false)
   ret void
 }
 
@@ -89,7 +85,7 @@ entry:
 ;     }
 ; }
 
-define void @test4(i32* noalias %X, i32* noalias readonly %Y, i32 %n) {
+define void @test4(ptr noalias %X, ptr noalias readonly %Y, i32 %n) {
 ; CHECK-LABEL: test4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -111,9 +107,7 @@ entry:
   br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  %X.bits = bitcast i32* %X to i8*
-  %Y.bits = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %X.bits, i8* align 4 %Y.bits, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
   br label %for.cond.cleanup
 
 for.cond.cleanup:                                 ; preds = %for.body.preheader, %entry
@@ -121,7 +115,7 @@ for.cond.cleanup:                                 ; preds = %for.body.preheader,
 }
 
 ; Checks that transform can handle > i32 size inputs
-define void @test5(i8* noalias %X, i8* noalias %Y, i64 %n){
+define void @test5(ptr noalias %X, ptr noalias %Y, i64 %n){
 ; CHECK-LABEL: test5:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r7, lr}
@@ -133,12 +127,12 @@ define void @test5(i8* noalias %X, i8* noalias %Y, i64 %n){
 ; CHECK-NEXT:    letp lr, .LBB4_1
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    pop {r7, pc}
-    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %X, i8* align 4 %Y, i64 %n, i1 false)
+    call void @llvm.memcpy.p0.p0.i64(ptr align 4 %X, ptr align 4 %Y, i64 %n, i1 false)
     ret void
 }
 
 ; Checks the transform is applied for constant size inputs below a certain threshold (128 in this case)
-define void @test6(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) {
+define void @test6(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) {
 ; CHECK-LABEL: test6:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -152,14 +146,12 @@ define void @test6(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y
 ; CHECK-NEXT:  .LBB5_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 4 dereferenceable(127) %0, i8* noundef nonnull align 4 dereferenceable(127) %1, i32 127, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 4 dereferenceable(127) %X, ptr noundef nonnull align 4 dereferenceable(127) %Y, i32 127, i1 false)
   ret void
 }
 
 ; Checks the transform is NOT applied for constant size inputs above a certain threshold (128 in this case)
-define void @test7(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) {
+define void @test7(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) {
 ; CHECK-LABEL: test7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -168,14 +160,12 @@ define void @test7(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y
 ; CHECK-NEXT:    bl __aeabi_memcpy4
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 128, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 128, i1 false)
   ret void
 }
 
 ; Checks the transform is NOT applied for constant size inputs below a certain threshold (64 in this case)
-define void @test8(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) {
+define void @test8(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) {
 ; CHECK-LABEL: test8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -188,14 +178,12 @@ define void @test8(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y
 ; CHECK-NEXT:    stm.w r0, {r2, r3, r4, r12, lr}
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 60, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 60, i1 false)
   ret void
 }
 
 ; Checks the transform is NOT applied (regardless of alignment) when optimizations are disabled
-define void @test9(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) #0 {
+define void @test9(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) #0 {
 ; CHECK-LABEL: test9:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -203,14 +191,12 @@ define void @test9(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y
 ; CHECK-NEXT:    bl __aeabi_memcpy4
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
   ret void
 }
 
 ; Checks the transform is NOT applied (regardless of alignment) when optimization for size is on (-Os or -Oz)
-define void @test10(i32* noalias nocapture %X, i32* noalias nocapture readonly %Y, i32 %n) #1 {
+define void @test10(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) #1 {
 ; CHECK-LABEL: test10:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -218,13 +204,11 @@ define void @test10(i32* noalias nocapture %X, i32* noalias nocapture readonly %
 ; CHECK-NEXT:    bl __aeabi_memcpy4
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  %1 = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
   ret void
 }
 
-define void @test11(i8* nocapture %x, i8* nocapture %y, i32 %n) {
+define void @test11(ptr nocapture %x, ptr nocapture %y, i32 %n) {
 ; CHECK-LABEL: test11:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -253,17 +237,17 @@ entry:
   br i1 %cmp6, label %prehead, label %for.cond.cleanup
 
 prehead:                                          ; preds = %entry
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %x, i8* align 4 %y, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %x, ptr align 4 %y, i32 %n, i1 false)
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %prehead
   %i.09 = phi i32 [ %inc, %for.body ], [ 0, %prehead ]
-  %x.addr.08 = phi i8* [ %add.ptr, %for.body ], [ %x, %prehead ]
-  %y.addr.07 = phi i8* [ %add.ptr1, %for.body ], [ %y, %prehead ]
-  %add.ptr = getelementptr inbounds i8, i8* %x.addr.08, i32 1
-  %add.ptr1 = getelementptr inbounds i8, i8* %y.addr.07, i32 1
-  %l = load i8, i8* %x.addr.08, align 1
-  store i8 %l, i8* %y.addr.07, align 1
+  %x.addr.08 = phi ptr [ %add.ptr, %for.body ], [ %x, %prehead ]
+  %y.addr.07 = phi ptr [ %add.ptr1, %for.body ], [ %y, %prehead ]
+  %add.ptr = getelementptr inbounds i8, ptr %x.addr.08, i32 1
+  %add.ptr1 = getelementptr inbounds i8, ptr %y.addr.07, i32 1
+  %l = load i8, ptr %x.addr.08, align 1
+  store i8 %l, ptr %y.addr.07, align 1
   %inc = add nuw nsw i32 %i.09, 2
   %exitcond.not = icmp eq i32 %inc, %n
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
@@ -273,7 +257,7 @@ for.cond.cleanup:                                 ; preds = %entry
 }
 
 ; Check that WLSTP loop is generated for simplest case of align = 1
-define void @test12(i8* %X, i8 zeroext %c, i32 %n) {
+define void @test12(ptr %X, i8 zeroext %c, i32 %n) {
 ; CHECK-LABEL: test12:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -286,13 +270,13 @@ define void @test12(i8* %X, i8 zeroext %c, i32 %n) {
 ; CHECK-NEXT:  .LBB11_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  call void @llvm.memset.p0i8.i32(i8* align 1 %X, i8 %c, i32 %n, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 %X, i8 %c, i32 %n, i1 false)
   ret void
 }
 
 
 ; Check that WLSTP loop is generated for alignment >= 4
-define void @test13(i32* %X, i8 zeroext %c, i32 %n) {
+define void @test13(ptr %X, i8 zeroext %c, i32 %n) {
 ; CHECK-LABEL: test13:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -305,12 +289,11 @@ define void @test13(i32* %X, i8 zeroext %c, i32 %n) {
 ; CHECK-NEXT:  .LBB12_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 %c, i32 %n, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 %X, i8 %c, i32 %n, i1 false)
   ret void
 }
 
-define void @twoloops(i32* %X, i32 %n, i32 %m) {
+define void @twoloops(ptr %X, i32 %n, i32 %m) {
 ; CHECK-LABEL: twoloops:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -329,9 +312,8 @@ define void @twoloops(i32* %X, i32 %n, i32 %m) {
 ; CHECK-NEXT:  .LBB13_4: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %m, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %m, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 %X, i8 0, i32 %m, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 %X, i8 0, i32 %m, i1 false)
   ret void
 }
 
@@ -342,7 +324,7 @@ entry:
 ;     memset(X+2, c, (n*2)+10);
 ; }
 
-define void @test14(i32* %X, i8 zeroext %c, i32 %n) {
+define void @test14(ptr %X, i8 zeroext %c, i32 %n) {
 ; CHECK-LABEL: test14:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -358,11 +340,10 @@ define void @test14(i32* %X, i8 zeroext %c, i32 %n) {
 ; CHECK-NEXT:  .LBB14_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %X, i32 2
-  %0 = bitcast i32* %add.ptr to i8*
+  %add.ptr = getelementptr inbounds i32, ptr %X, i32 2
   %mul = shl nsw i32 %n, 1
   %add = add nsw i32 %mul, 10
-  call void @llvm.memset.p0i8.i32(i8* nonnull align 4 %0, i8 %c, i32 %add, i1 false)
+  call void @llvm.memset.p0.i32(ptr nonnull align 4 %add.ptr, i8 %c, i32 %add, i1 false)
   ret void
 }
 
@@ -376,7 +357,7 @@ entry:
 ;     }
 ; }
 
-define void @test15(i8* nocapture %X, i8 zeroext %c, i32 %n) {
+define void @test15(ptr nocapture %X, i8 zeroext %c, i32 %n) {
 ; CHECK-LABEL: test15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r2, #1
@@ -398,7 +379,7 @@ entry:
   br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:                               ; preds = %entry
-  call void @llvm.memset.p0i8.i32(i8* align 4 %X, i8 %c, i32 %n, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 %X, i8 %c, i32 %n, i1 false)
   br label %for.cond.cleanup
 
 for.cond.cleanup:                                 ; preds = %for.body.preheader, %entry
@@ -406,7 +387,7 @@ for.cond.cleanup:                                 ; preds = %for.body.preheader,
 }
 
 ; Checks that transform handles case with 0 as src value. No 
diff erence is expected.
-define void @test16(i32* %X, i8 zeroext %c, i32 %n) {
+define void @test16(ptr %X, i8 zeroext %c, i32 %n) {
 ; CHECK-LABEL: test16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -419,12 +400,11 @@ define void @test16(i32* %X, i8 zeroext %c, i32 %n) {
 ; CHECK-NEXT:  .LBB16_2: @ %entry
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %0 = bitcast i32* %X to i8*
-  call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %n, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 %X, i8 0, i32 %n, i1 false)
   ret void
 }
 
-define void @csprlive(i32* noalias %X, i32* noalias readonly %Y, i32 %n) {
+define void @csprlive(ptr noalias %X, ptr noalias readonly %Y, i32 %n) {
 ; CHECK-LABEL: csprlive:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -439,9 +419,7 @@ define void @csprlive(i32* noalias %X, i32* noalias readonly %Y, i32 %n) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %cmp6 = icmp sgt i32 %n, 0
-  %X.bits = bitcast i32* %X to i8*
-  %Y.bits = bitcast i32* %Y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %X.bits, i8* align 4 %Y.bits, i32 %n, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
   br i1 %cmp6, label %if, label %else
 
 if:
@@ -528,10 +506,10 @@ entry:
 
 loop:
   %p = phi i32 [ 0, %entry ], [ %inc, %loop ]
-  call void @llvm.memset.p0i8.i32(i8* align 1 getelementptr ([21 x [16 x [11 x i8]]], [21 x [16 x [11 x i8]]]* @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 1 getelementptr ([21 x [16 x [11 x i8]]], [21 x [16 x [11 x i8]]]* @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 1 getelementptr ([21 x [16 x [11 x i8]]], [21 x [16 x [11 x i8]]]* @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 1 getelementptr ([21 x [16 x [11 x i8]]], [21 x [16 x [11 x i8]]]* @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 getelementptr ([21 x [16 x [11 x i8]]], ptr @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 getelementptr ([21 x [16 x [11 x i8]]], ptr @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 getelementptr ([21 x [16 x [11 x i8]]], ptr @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 1 getelementptr ([21 x [16 x [11 x i8]]], ptr @arr_56, i32 0, i32 0, i32 undef, i32 0), i8 0, i32 %b, i1 false)
   %inc = add i32 %p, 1
   %c = icmp slt i32 %p, 1024
   br i1 %c, label %loop, label %exit
@@ -792,205 +770,161 @@ define i32 @reverted(i1 zeroext %b) {
 entry:
   %add = select i1 %b, i32 12, i32 11
   %0 = mul nuw nsw i32 %add, 38
-  call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 2 dereferenceable(1) bitcast ([17 x [12 x [19 x i16]]]* @arr_22 to i8*), i8 0, i32 %0, i1 false)
+  call void @llvm.memset.p0.i32(ptr noundef nonnull align 2 dereferenceable(1) @arr_22, i8 0, i32 %0, i1 false)
   br label %for.cond8.preheader
 
 for.cond8.preheader:                              ; preds = %entry, %for.cond8.preheader
   %d.051 = phi i32 [ 0, %entry ], [ %inc, %for.cond8.preheader ]
-  %arrayidx16 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 0, i32 %d.051, i32 0
-  %arrayidx21 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 0
-  %1 = bitcast i64* %arrayidx21 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %1, align 8
-  %arrayidx21.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 2
-  %2 = bitcast i64* %arrayidx21.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %2, align 8
-  %arrayidx21.4 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 4
-  %3 = bitcast i64* %arrayidx21.4 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %3, align 8
-  %arrayidx21.6 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 6
-  %4 = bitcast i16* %arrayidx16 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %4, align 2
-  %5 = bitcast i64* %arrayidx21.6 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %5, align 8
-  %arrayidx16.8 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 0, i32 %d.051, i32 8
-  %arrayidx21.8 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 8
-  %6 = bitcast i64* %arrayidx21.8 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %6, align 8
-  %arrayidx21.10 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 10
-  %7 = bitcast i64* %arrayidx21.10 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %7, align 8
-  %arrayidx21.12 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 12
-  %8 = bitcast i64* %arrayidx21.12 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %8, align 8
-  %arrayidx21.14 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 14
-  %9 = bitcast i16* %arrayidx16.8 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %9, align 2
-  %10 = bitcast i64* %arrayidx21.14 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %10, align 8
-  %arrayidx16.16 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 0, i32 %d.051, i32 16
-  store i16 5, i16* %arrayidx16.16, align 2
-  %arrayidx21.16 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 16
-  %arrayidx16.17 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 0, i32 %d.051, i32 17
-  store i16 5, i16* %arrayidx16.17, align 2
-  %11 = bitcast i64* %arrayidx21.16 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %11, align 8
-  %arrayidx16.18 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 0, i32 %d.051, i32 18
-  store i16 5, i16* %arrayidx16.18, align 2
-  %arrayidx21.18 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 0, i32 %d.051, i32 18
-  store i64 5, i64* %arrayidx21.18, align 8
+  %arrayidx16 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 0, i32 %d.051, i32 0
+  %arrayidx21 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 0
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21, align 8
+  %arrayidx21.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.2, align 8
+  %arrayidx21.4 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 4
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.4, align 8
+  %arrayidx21.6 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 6
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.6, align 8
+  %arrayidx16.8 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 0, i32 %d.051, i32 8
+  %arrayidx21.8 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 8
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.8, align 8
+  %arrayidx21.10 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 10
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.10, align 8
+  %arrayidx21.12 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 12
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.12, align 8
+  %arrayidx21.14 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 14
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.8, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.14, align 8
+  %arrayidx16.16 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 0, i32 %d.051, i32 16
+  store i16 5, ptr %arrayidx16.16, align 2
+  %arrayidx21.16 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 16
+  %arrayidx16.17 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 0, i32 %d.051, i32 17
+  store i16 5, ptr %arrayidx16.17, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.16, align 8
+  %arrayidx16.18 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 0, i32 %d.051, i32 18
+  store i16 5, ptr %arrayidx16.18, align 2
+  %arrayidx21.18 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 0, i32 %d.051, i32 18
+  store i64 5, ptr %arrayidx21.18, align 8
   %inc = add nuw nsw i32 %d.051, 1
   %exitcond.not = icmp eq i32 %inc, %add
   br i1 %exitcond.not, label %for.cond.cleanup6, label %for.cond8.preheader
 
 for.cond.cleanup6:                                ; preds = %for.cond8.preheader
-  call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 2 dereferenceable(1) bitcast (i16* getelementptr inbounds ([17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_22, i32 0, i32 4, i32 0, i32 0) to i8*), i8 0, i32 %0, i1 false)
+  call void @llvm.memset.p0.i32(ptr noundef nonnull align 2 dereferenceable(1) getelementptr inbounds ([17 x [12 x [19 x i16]]], ptr @arr_22, i32 0, i32 4, i32 0, i32 0), i8 0, i32 %0, i1 false)
   br label %for.cond8.preheader.1
 
 for.cond8.preheader.1:                            ; preds = %for.cond8.preheader.1, %for.cond.cleanup6
   %d.051.1 = phi i32 [ 0, %for.cond.cleanup6 ], [ %inc.1, %for.cond8.preheader.1 ]
-  %arrayidx16.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 4, i32 %d.051.1, i32 0
-  %arrayidx21.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 0
-  %12 = bitcast i64* %arrayidx21.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %12, align 8
-  %arrayidx21.2.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 2
-  %13 = bitcast i64* %arrayidx21.2.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %13, align 8
-  %arrayidx21.4.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 4
-  %14 = bitcast i64* %arrayidx21.4.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %14, align 8
-  %arrayidx21.6.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 6
-  %15 = bitcast i16* %arrayidx16.1 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %15, align 2
-  %16 = bitcast i64* %arrayidx21.6.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %16, align 8
-  %arrayidx16.8.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 4, i32 %d.051.1, i32 8
-  %arrayidx21.8.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 8
-  %17 = bitcast i64* %arrayidx21.8.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %17, align 8
-  %arrayidx21.10.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 10
-  %18 = bitcast i64* %arrayidx21.10.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %18, align 8
-  %arrayidx21.12.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 12
-  %19 = bitcast i64* %arrayidx21.12.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %19, align 8
-  %arrayidx21.14.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 14
-  %20 = bitcast i16* %arrayidx16.8.1 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %20, align 2
-  %21 = bitcast i64* %arrayidx21.14.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %21, align 8
-  %arrayidx16.16.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 4, i32 %d.051.1, i32 16
-  store i16 5, i16* %arrayidx16.16.1, align 2
-  %arrayidx21.16.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 16
-  %arrayidx16.17.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 4, i32 %d.051.1, i32 17
-  store i16 5, i16* %arrayidx16.17.1, align 2
-  %22 = bitcast i64* %arrayidx21.16.1 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %22, align 8
-  %arrayidx16.18.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 4, i32 %d.051.1, i32 18
-  store i16 5, i16* %arrayidx16.18.1, align 2
-  %arrayidx21.18.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 4, i32 %d.051.1, i32 18
-  store i64 5, i64* %arrayidx21.18.1, align 8
+  %arrayidx16.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 4, i32 %d.051.1, i32 0
+  %arrayidx21.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 0
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.1, align 8
+  %arrayidx21.2.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.2.1, align 8
+  %arrayidx21.4.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 4
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.4.1, align 8
+  %arrayidx21.6.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 6
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.1, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.6.1, align 8
+  %arrayidx16.8.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 4, i32 %d.051.1, i32 8
+  %arrayidx21.8.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 8
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.8.1, align 8
+  %arrayidx21.10.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 10
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.10.1, align 8
+  %arrayidx21.12.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 12
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.12.1, align 8
+  %arrayidx21.14.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 14
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.8.1, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.14.1, align 8
+  %arrayidx16.16.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 4, i32 %d.051.1, i32 16
+  store i16 5, ptr %arrayidx16.16.1, align 2
+  %arrayidx21.16.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 16
+  %arrayidx16.17.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 4, i32 %d.051.1, i32 17
+  store i16 5, ptr %arrayidx16.17.1, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.16.1, align 8
+  %arrayidx16.18.1 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 4, i32 %d.051.1, i32 18
+  store i16 5, ptr %arrayidx16.18.1, align 2
+  %arrayidx21.18.1 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 4, i32 %d.051.1, i32 18
+  store i64 5, ptr %arrayidx21.18.1, align 8
   %inc.1 = add nuw nsw i32 %d.051.1, 1
   %exitcond.not.1 = icmp eq i32 %inc.1, %add
   br i1 %exitcond.not.1, label %for.cond.cleanup6.1, label %for.cond8.preheader.1
 
 for.cond.cleanup6.1:                              ; preds = %for.cond8.preheader.1
-  call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 2 dereferenceable(1) bitcast (i16* getelementptr inbounds ([17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_22, i32 0, i32 8, i32 0, i32 0) to i8*), i8 0, i32 %0, i1 false)
+  call void @llvm.memset.p0.i32(ptr noundef nonnull align 2 dereferenceable(1) getelementptr inbounds ([17 x [12 x [19 x i16]]], ptr @arr_22, i32 0, i32 8, i32 0, i32 0), i8 0, i32 %0, i1 false)
   br label %for.cond8.preheader.2
 
 for.cond8.preheader.2:                            ; preds = %for.cond8.preheader.2, %for.cond.cleanup6.1
   %d.051.2 = phi i32 [ 0, %for.cond.cleanup6.1 ], [ %inc.2, %for.cond8.preheader.2 ]
-  %arrayidx16.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 8, i32 %d.051.2, i32 0
-  %arrayidx21.254 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 0
-  %23 = bitcast i64* %arrayidx21.254 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %23, align 8
-  %arrayidx21.2.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 2
-  %24 = bitcast i64* %arrayidx21.2.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %24, align 8
-  %arrayidx21.4.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 4
-  %25 = bitcast i64* %arrayidx21.4.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %25, align 8
-  %arrayidx21.6.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 6
-  %26 = bitcast i16* %arrayidx16.2 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %26, align 2
-  %27 = bitcast i64* %arrayidx21.6.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %27, align 8
-  %arrayidx16.8.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 8, i32 %d.051.2, i32 8
-  %arrayidx21.8.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 8
-  %28 = bitcast i64* %arrayidx21.8.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %28, align 8
-  %arrayidx21.10.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 10
-  %29 = bitcast i64* %arrayidx21.10.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %29, align 8
-  %arrayidx21.12.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 12
-  %30 = bitcast i64* %arrayidx21.12.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %30, align 8
-  %arrayidx21.14.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 14
-  %31 = bitcast i16* %arrayidx16.8.2 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %31, align 2
-  %32 = bitcast i64* %arrayidx21.14.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %32, align 8
-  %arrayidx16.16.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 8, i32 %d.051.2, i32 16
-  store i16 5, i16* %arrayidx16.16.2, align 2
-  %arrayidx21.16.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 16
-  %arrayidx16.17.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 8, i32 %d.051.2, i32 17
-  store i16 5, i16* %arrayidx16.17.2, align 2
-  %33 = bitcast i64* %arrayidx21.16.2 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %33, align 8
-  %arrayidx16.18.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 8, i32 %d.051.2, i32 18
-  store i16 5, i16* %arrayidx16.18.2, align 2
-  %arrayidx21.18.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 8, i32 %d.051.2, i32 18
-  store i64 5, i64* %arrayidx21.18.2, align 8
+  %arrayidx16.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 8, i32 %d.051.2, i32 0
+  %arrayidx21.254 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 0
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.254, align 8
+  %arrayidx21.2.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.2.2, align 8
+  %arrayidx21.4.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 4
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.4.2, align 8
+  %arrayidx21.6.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 6
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.2, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.6.2, align 8
+  %arrayidx16.8.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 8, i32 %d.051.2, i32 8
+  %arrayidx21.8.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 8
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.8.2, align 8
+  %arrayidx21.10.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 10
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.10.2, align 8
+  %arrayidx21.12.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 12
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.12.2, align 8
+  %arrayidx21.14.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 14
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.8.2, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.14.2, align 8
+  %arrayidx16.16.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 8, i32 %d.051.2, i32 16
+  store i16 5, ptr %arrayidx16.16.2, align 2
+  %arrayidx21.16.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 16
+  %arrayidx16.17.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 8, i32 %d.051.2, i32 17
+  store i16 5, ptr %arrayidx16.17.2, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.16.2, align 8
+  %arrayidx16.18.2 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 8, i32 %d.051.2, i32 18
+  store i16 5, ptr %arrayidx16.18.2, align 2
+  %arrayidx21.18.2 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 8, i32 %d.051.2, i32 18
+  store i64 5, ptr %arrayidx21.18.2, align 8
   %inc.2 = add nuw nsw i32 %d.051.2, 1
   %exitcond.not.2 = icmp eq i32 %inc.2, %add
   br i1 %exitcond.not.2, label %for.cond.cleanup6.2, label %for.cond8.preheader.2
 
 for.cond.cleanup6.2:                              ; preds = %for.cond8.preheader.2
-  call void @llvm.memset.p0i8.i32(i8* noundef nonnull align 2 dereferenceable(1) bitcast (i16* getelementptr inbounds ([17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_22, i32 0, i32 12, i32 0, i32 0) to i8*), i8 0, i32 %0, i1 false)
+  call void @llvm.memset.p0.i32(ptr noundef nonnull align 2 dereferenceable(1) getelementptr inbounds ([17 x [12 x [19 x i16]]], ptr @arr_22, i32 0, i32 12, i32 0, i32 0), i8 0, i32 %0, i1 false)
   br label %for.cond8.preheader.3
 
 for.cond8.preheader.3:                            ; preds = %for.cond8.preheader.3, %for.cond.cleanup6.2
   %d.051.3 = phi i32 [ 0, %for.cond.cleanup6.2 ], [ %inc.3, %for.cond8.preheader.3 ]
-  %arrayidx16.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 12, i32 %d.051.3, i32 0
-  %arrayidx21.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 0
-  %34 = bitcast i64* %arrayidx21.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %34, align 8
-  %arrayidx21.2.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 2
-  %35 = bitcast i64* %arrayidx21.2.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %35, align 8
-  %arrayidx21.4.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 4
-  %36 = bitcast i64* %arrayidx21.4.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %36, align 8
-  %arrayidx21.6.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 6
-  %37 = bitcast i16* %arrayidx16.3 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %37, align 2
-  %38 = bitcast i64* %arrayidx21.6.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %38, align 8
-  %arrayidx16.8.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 12, i32 %d.051.3, i32 8
-  %arrayidx21.8.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 8
-  %39 = bitcast i64* %arrayidx21.8.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %39, align 8
-  %arrayidx21.10.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 10
-  %40 = bitcast i64* %arrayidx21.10.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %40, align 8
-  %arrayidx21.12.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 12
-  %41 = bitcast i64* %arrayidx21.12.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %41, align 8
-  %arrayidx21.14.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 14
-  %42 = bitcast i16* %arrayidx16.8.3 to <8 x i16>*
-  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, <8 x i16>* %42, align 2
-  %43 = bitcast i64* %arrayidx21.14.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %43, align 8
-  %arrayidx16.16.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 12, i32 %d.051.3, i32 16
-  store i16 5, i16* %arrayidx16.16.3, align 2
-  %arrayidx21.16.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 16
-  %arrayidx16.17.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 12, i32 %d.051.3, i32 17
-  store i16 5, i16* %arrayidx16.17.3, align 2
-  %44 = bitcast i64* %arrayidx21.16.3 to <2 x i64>*
-  store <2 x i64> <i64 5, i64 5>, <2 x i64>* %44, align 8
-  %arrayidx16.18.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], [17 x [12 x [19 x i16]]]* @arr_21, i32 0, i32 12, i32 %d.051.3, i32 18
-  store i16 5, i16* %arrayidx16.18.3, align 2
-  %arrayidx21.18.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], [17 x [12 x [19 x i64]]]* @arr_20, i32 0, i32 12, i32 %d.051.3, i32 18
-  store i64 5, i64* %arrayidx21.18.3, align 8
+  %arrayidx16.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 12, i32 %d.051.3, i32 0
+  %arrayidx21.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 0
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.3, align 8
+  %arrayidx21.2.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.2.3, align 8
+  %arrayidx21.4.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 4
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.4.3, align 8
+  %arrayidx21.6.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 6
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.3, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.6.3, align 8
+  %arrayidx16.8.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 12, i32 %d.051.3, i32 8
+  %arrayidx21.8.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 8
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.8.3, align 8
+  %arrayidx21.10.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 10
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.10.3, align 8
+  %arrayidx21.12.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 12
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.12.3, align 8
+  %arrayidx21.14.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 14
+  store <8 x i16> <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>, ptr %arrayidx16.8.3, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.14.3, align 8
+  %arrayidx16.16.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 12, i32 %d.051.3, i32 16
+  store i16 5, ptr %arrayidx16.16.3, align 2
+  %arrayidx21.16.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 16
+  %arrayidx16.17.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 12, i32 %d.051.3, i32 17
+  store i16 5, ptr %arrayidx16.17.3, align 2
+  store <2 x i64> <i64 5, i64 5>, ptr %arrayidx21.16.3, align 8
+  %arrayidx16.18.3 = getelementptr inbounds [17 x [12 x [19 x i16]]], ptr @arr_21, i32 0, i32 12, i32 %d.051.3, i32 18
+  store i16 5, ptr %arrayidx16.18.3, align 2
+  %arrayidx21.18.3 = getelementptr inbounds [17 x [12 x [19 x i64]]], ptr @arr_20, i32 0, i32 12, i32 %d.051.3, i32 18
+  store i64 5, ptr %arrayidx21.18.3, align 8
   %inc.3 = add nuw nsw i32 %d.051.3, 1
   %exitcond.not.3 = icmp eq i32 %inc.3, %add
   br i1 %exitcond.not.3, label %for.cond.cleanup6.3, label %for.cond8.preheader.3

diff  --git a/llvm/test/CodeGen/Thumb2/mve-minmaxi.ll b/llvm/test/CodeGen/Thumb2/mve-minmaxi.ll
index 892be9a433073..19ec01bb74a46 100644
--- a/llvm/test/CodeGen/Thumb2/mve-minmaxi.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-minmaxi.ll
@@ -84,7 +84,7 @@ define arm_aapcs_vfpcc <16 x i8> @smax16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define arm_aapcs_vfpcc void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define arm_aapcs_vfpcc void @smax32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-LABEL: smax32i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.s8 q1, q1, q3
@@ -93,7 +93,7 @@ define arm_aapcs_vfpcc void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -123,7 +123,7 @@ define arm_aapcs_vfpcc <8 x i16> @smax8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define arm_aapcs_vfpcc void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define arm_aapcs_vfpcc void @smax16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-LABEL: smax16i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.s16 q1, q1, q3
@@ -132,7 +132,7 @@ define arm_aapcs_vfpcc void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>*
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -184,7 +184,7 @@ define arm_aapcs_vfpcc <4 x i32> @smax4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define arm_aapcs_vfpcc void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define arm_aapcs_vfpcc void @smax8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-LABEL: smax8i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.s32 q1, q1, q3
@@ -193,7 +193,7 @@ define arm_aapcs_vfpcc void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <8 x i32>@llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -246,7 +246,7 @@ define arm_aapcs_vfpcc <2 x i64> @smax2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define arm_aapcs_vfpcc void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define arm_aapcs_vfpcc void @smax4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-LABEL: smax4i64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, lr}
@@ -285,7 +285,7 @@ define arm_aapcs_vfpcc void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    pop {r4, pc}
   %c = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -372,7 +372,7 @@ define arm_aapcs_vfpcc <16 x i8> @umax16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define arm_aapcs_vfpcc void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define arm_aapcs_vfpcc void @umax32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-LABEL: umax32i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.u8 q1, q1, q3
@@ -381,7 +381,7 @@ define arm_aapcs_vfpcc void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -411,7 +411,7 @@ define arm_aapcs_vfpcc <8 x i16> @umax8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define arm_aapcs_vfpcc void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define arm_aapcs_vfpcc void @umax16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-LABEL: umax16i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.u16 q1, q1, q3
@@ -420,7 +420,7 @@ define arm_aapcs_vfpcc void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>*
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -465,7 +465,7 @@ define arm_aapcs_vfpcc <4 x i32> @umax4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define arm_aapcs_vfpcc void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define arm_aapcs_vfpcc void @umax8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-LABEL: umax8i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmax.u32 q1, q1, q3
@@ -474,7 +474,7 @@ define arm_aapcs_vfpcc void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <8 x i32>@llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -527,7 +527,7 @@ define arm_aapcs_vfpcc <2 x i64> @umax2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define arm_aapcs_vfpcc void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define arm_aapcs_vfpcc void @umax4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-LABEL: umax4i64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, lr}
@@ -566,7 +566,7 @@ define arm_aapcs_vfpcc void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    pop {r4, pc}
   %c = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -653,7 +653,7 @@ define arm_aapcs_vfpcc <16 x i8> @smin16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define arm_aapcs_vfpcc void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define arm_aapcs_vfpcc void @smin32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-LABEL: smin32i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.s8 q1, q1, q3
@@ -662,7 +662,7 @@ define arm_aapcs_vfpcc void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -692,7 +692,7 @@ define arm_aapcs_vfpcc <8 x i16> @smin8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define arm_aapcs_vfpcc void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define arm_aapcs_vfpcc void @smin16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-LABEL: smin16i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.s16 q1, q1, q3
@@ -701,7 +701,7 @@ define arm_aapcs_vfpcc void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>*
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -753,7 +753,7 @@ define arm_aapcs_vfpcc <4 x i32> @smin4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define arm_aapcs_vfpcc void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define arm_aapcs_vfpcc void @smin8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-LABEL: smin8i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.s32 q1, q1, q3
@@ -762,7 +762,7 @@ define arm_aapcs_vfpcc void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <8 x i32>@llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -815,7 +815,7 @@ define arm_aapcs_vfpcc <2 x i64> @smin2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define arm_aapcs_vfpcc void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define arm_aapcs_vfpcc void @smin4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-LABEL: smin4i64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, lr}
@@ -854,7 +854,7 @@ define arm_aapcs_vfpcc void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    pop {r4, pc}
   %c = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -941,7 +941,7 @@ define arm_aapcs_vfpcc <16 x i8> @umin16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define arm_aapcs_vfpcc void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define arm_aapcs_vfpcc void @umin32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-LABEL: umin32i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.u8 q1, q1, q3
@@ -950,7 +950,7 @@ define arm_aapcs_vfpcc void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -980,7 +980,7 @@ define arm_aapcs_vfpcc <8 x i16> @umin8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define arm_aapcs_vfpcc void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define arm_aapcs_vfpcc void @umin16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-LABEL: umin16i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.u16 q1, q1, q3
@@ -989,7 +989,7 @@ define arm_aapcs_vfpcc void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>*
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -1034,7 +1034,7 @@ define arm_aapcs_vfpcc <4 x i32> @umin4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define arm_aapcs_vfpcc void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define arm_aapcs_vfpcc void @umin8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-LABEL: umin8i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmin.u32 q1, q1, q3
@@ -1043,7 +1043,7 @@ define arm_aapcs_vfpcc void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
   %c = call <8 x i32>@llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -1096,7 +1096,7 @@ define arm_aapcs_vfpcc <2 x i64> @umin2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define arm_aapcs_vfpcc void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define arm_aapcs_vfpcc void @umin4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-LABEL: umin4i64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, lr}
@@ -1135,6 +1135,6 @@ define arm_aapcs_vfpcc void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p)
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    pop {r4, pc}
   %c = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-multivec-spill.ll b/llvm/test/CodeGen/Thumb2/mve-multivec-spill.ll
index cab8991d31b93..fe1d28412c34e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-multivec-spill.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-multivec-spill.ll
@@ -4,7 +4,7 @@
 
 declare void @external_function()
 
-define arm_aapcs_vfpcc void @spill_multivector(<4 x i32>* %p) {
+define arm_aapcs_vfpcc void @spill_multivector(ptr %p) {
 ; CHECK-LABEL: spill_multivector:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -54,52 +54,51 @@ define arm_aapcs_vfpcc void @spill_multivector(<4 x i32>* %p) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %ip01 = bitcast <4 x i32>* %p to i32*
-  %v01 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32* %ip01)
-  %ip23 = getelementptr i32, i32* %ip01, i32 16
-  %v23 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32* %ip23)
-  %ip45 = getelementptr i32, i32* %ip23, i32 16
-  %v45 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32* %ip45)
-  %ip67 = getelementptr i32, i32* %ip45, i32 16
-  %v67 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32* %ip67)
-  %ip89 = getelementptr i32, i32* %ip67, i32 16
-  %v89 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32* %ip89)
+  %v01 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr %p)
+  %ip23 = getelementptr i32, ptr %p, i32 16
+  %v23 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr %ip23)
+  %ip45 = getelementptr i32, ptr %ip23, i32 16
+  %v45 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr %ip45)
+  %ip67 = getelementptr i32, ptr %ip45, i32 16
+  %v67 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr %ip67)
+  %ip89 = getelementptr i32, ptr %ip67, i32 16
+  %v89 = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr %ip89)
   call void @external_function()
 
   %v0 = extractvalue { <4 x i32>, <4 x i32> } %v01, 0
   %v1 = extractvalue { <4 x i32>, <4 x i32> } %v01, 1
-  store <4 x i32> %v0, <4 x i32>* %p, align 4
-  %p1 = getelementptr <4 x i32>, <4 x i32>* %p, i32 1
-  store <4 x i32> %v1, <4 x i32>* %p1, align 4
+  store <4 x i32> %v0, ptr %p, align 4
+  %p1 = getelementptr <4 x i32>, ptr %p, i32 1
+  store <4 x i32> %v1, ptr %p1, align 4
 
   %v2 = extractvalue { <4 x i32>, <4 x i32> } %v23, 0
   %v3 = extractvalue { <4 x i32>, <4 x i32> } %v23, 1
-  %p2 = getelementptr <4 x i32>, <4 x i32>* %p, i32 2
-  store <4 x i32> %v2, <4 x i32>* %p2, align 4
-  %p3 = getelementptr <4 x i32>, <4 x i32>* %p, i32 3
-  store <4 x i32> %v3, <4 x i32>* %p3, align 4
+  %p2 = getelementptr <4 x i32>, ptr %p, i32 2
+  store <4 x i32> %v2, ptr %p2, align 4
+  %p3 = getelementptr <4 x i32>, ptr %p, i32 3
+  store <4 x i32> %v3, ptr %p3, align 4
 
   %v4 = extractvalue { <4 x i32>, <4 x i32> } %v45, 0
   %v5 = extractvalue { <4 x i32>, <4 x i32> } %v45, 1
-  %p4 = getelementptr <4 x i32>, <4 x i32>* %p, i32 4
-  store <4 x i32> %v4, <4 x i32>* %p4, align 4
-  %p5 = getelementptr <4 x i32>, <4 x i32>* %p, i32 5
-  store <4 x i32> %v5, <4 x i32>* %p5, align 4
+  %p4 = getelementptr <4 x i32>, ptr %p, i32 4
+  store <4 x i32> %v4, ptr %p4, align 4
+  %p5 = getelementptr <4 x i32>, ptr %p, i32 5
+  store <4 x i32> %v5, ptr %p5, align 4
 
   %v6 = extractvalue { <4 x i32>, <4 x i32> } %v67, 0
   %v7 = extractvalue { <4 x i32>, <4 x i32> } %v67, 1
-  %p6 = getelementptr <4 x i32>, <4 x i32>* %p, i32 6
-  store <4 x i32> %v6, <4 x i32>* %p6, align 4
-  %p7 = getelementptr <4 x i32>, <4 x i32>* %p, i32 7
-  store <4 x i32> %v7, <4 x i32>* %p7, align 4
+  %p6 = getelementptr <4 x i32>, ptr %p, i32 6
+  store <4 x i32> %v6, ptr %p6, align 4
+  %p7 = getelementptr <4 x i32>, ptr %p, i32 7
+  store <4 x i32> %v7, ptr %p7, align 4
 
   %v8 = extractvalue { <4 x i32>, <4 x i32> } %v89, 0
   %v9 = extractvalue { <4 x i32>, <4 x i32> } %v89, 1
-  %p8 = getelementptr <4 x i32>, <4 x i32>* %p, i32 8
-  store <4 x i32> %v8, <4 x i32>* %p8, align 4
-  %p9 = getelementptr <4 x i32>, <4 x i32>* %p, i32 9
-  store <4 x i32> %v9, <4 x i32>* %p9, align 4
+  %p8 = getelementptr <4 x i32>, ptr %p, i32 8
+  store <4 x i32> %v8, ptr %p8, align 4
+  %p9 = getelementptr <4 x i32>, ptr %p, i32 9
+  store <4 x i32> %v9, ptr %p9, align 4
   ret void
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vld2q.v4i32.v4i32.p0(ptr)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-nounrolledremainder.ll b/llvm/test/CodeGen/Thumb2/mve-nounrolledremainder.ll
index 71bdd6b0d86b6..491c800c5c14b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-nounrolledremainder.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-nounrolledremainder.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp -o - %s | FileCheck --check-prefix=CHECK %s
 
-define void @tailpred(half* nocapture readonly %pSrcA, half* nocapture readonly %pSrcB, half* nocapture %pDst, i32 %blockSize) {
+define void @tailpred(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: tailpred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -53,14 +53,14 @@ entry:
   br i1 %cmp.not6, label %while.end, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %entry
-  %scevgep = getelementptr half, half* %pDst, i32 %blockSize
-  %scevgep14 = getelementptr half, half* %pSrcA, i32 %blockSize
-  %scevgep17 = getelementptr half, half* %pSrcB, i32 %blockSize
-  %bound0 = icmp ugt half* %scevgep14, %pDst
-  %bound1 = icmp ugt half* %scevgep, %pSrcA
+  %scevgep = getelementptr half, ptr %pDst, i32 %blockSize
+  %scevgep14 = getelementptr half, ptr %pSrcA, i32 %blockSize
+  %scevgep17 = getelementptr half, ptr %pSrcB, i32 %blockSize
+  %bound0 = icmp ugt ptr %scevgep14, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrcA
   %found.conflict = and i1 %bound0, %bound1
-  %bound019 = icmp ugt half* %scevgep17, %pDst
-  %bound120 = icmp ugt half* %scevgep, %pSrcB
+  %bound019 = icmp ugt ptr %scevgep17, %pDst
+  %bound120 = icmp ugt ptr %scevgep, %pSrcB
   %found.conflict21 = and i1 %bound019, %bound120
   %conflict.rdx = or i1 %found.conflict, %found.conflict21
   br i1 %conflict.rdx, label %while.body, label %vector.ph
@@ -72,33 +72,30 @@ vector.ph:                                        ; preds = %vector.memcheck
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr half, half* %pSrcA, i32 %index
-  %next.gep28 = getelementptr half, half* %pDst, i32 %index
-  %next.gep29 = getelementptr half, half* %pSrcB, i32 %index
+  %next.gep = getelementptr half, ptr %pSrcA, i32 %index
+  %next.gep28 = getelementptr half, ptr %pDst, i32 %index
+  %next.gep29 = getelementptr half, ptr %pSrcB, i32 %index
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %blockSize)
-  %0 = bitcast half* %next.gep to <8 x half>*
-  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 2, <8 x i1> %active.lane.mask, <8 x half> undef)
-  %1 = bitcast half* %next.gep29 to <8 x half>*
-  %wide.masked.load32 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x half> undef)
-  %2 = fadd fast <8 x half> %wide.masked.load32, %wide.masked.load
-  %3 = bitcast half* %next.gep28 to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %2, <8 x half>* %3, i32 2, <8 x i1> %active.lane.mask)
+  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %next.gep, i32 2, <8 x i1> %active.lane.mask, <8 x half> undef)
+  %wide.masked.load32 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %next.gep29, i32 2, <8 x i1> %active.lane.mask, <8 x half> undef)
+  %0 = fadd fast <8 x half> %wide.masked.load32, %wide.masked.load
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %0, ptr %next.gep28, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %while.end, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %while.end, label %vector.body
 
 while.body:                                       ; preds = %vector.memcheck, %while.body
   %blkCnt.010 = phi i32 [ %dec, %while.body ], [ %blockSize, %vector.memcheck ]
-  %pSrcA.addr.09 = phi half* [ %incdec.ptr, %while.body ], [ %pSrcA, %vector.memcheck ]
-  %pDst.addr.08 = phi half* [ %incdec.ptr3, %while.body ], [ %pDst, %vector.memcheck ]
-  %pSrcB.addr.07 = phi half* [ %incdec.ptr1, %while.body ], [ %pSrcB, %vector.memcheck ]
-  %incdec.ptr = getelementptr inbounds half, half* %pSrcA.addr.09, i32 1
-  %5 = load half, half* %pSrcA.addr.09, align 2
-  %incdec.ptr1 = getelementptr inbounds half, half* %pSrcB.addr.07, i32 1
-  %6 = load half, half* %pSrcB.addr.07, align 2
-  %7 = fadd fast half %6, %5
-  %incdec.ptr3 = getelementptr inbounds half, half* %pDst.addr.08, i32 1
-  store half %7, half* %pDst.addr.08, align 2
+  %pSrcA.addr.09 = phi ptr [ %incdec.ptr, %while.body ], [ %pSrcA, %vector.memcheck ]
+  %pDst.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst, %vector.memcheck ]
+  %pSrcB.addr.07 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrcB, %vector.memcheck ]
+  %incdec.ptr = getelementptr inbounds half, ptr %pSrcA.addr.09, i32 1
+  %2 = load half, ptr %pSrcA.addr.09, align 2
+  %incdec.ptr1 = getelementptr inbounds half, ptr %pSrcB.addr.07, i32 1
+  %3 = load half, ptr %pSrcB.addr.07, align 2
+  %4 = fadd fast half %3, %2
+  %incdec.ptr3 = getelementptr inbounds half, ptr %pDst.addr.08, i32 1
+  store half %4, ptr %pDst.addr.08, align 2
   %dec = add i32 %blkCnt.010, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
@@ -107,7 +104,7 @@ while.end:                                        ; preds = %vector.body, %while
   ret void
 }
 
-define void @notailpred(half* nocapture readonly %pSrcA, half* nocapture readonly %pSrcB, half* nocapture %pDst, i32 %blockSize) {
+define void @notailpred(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr nocapture %pDst, i32 %blockSize) {
 ; CHECK-LABEL: notailpred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -180,14 +177,14 @@ while.body.preheader:                             ; preds = %entry
   br i1 %min.iters.check, label %while.body.preheader31, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %while.body.preheader
-  %scevgep = getelementptr half, half* %pDst, i32 %blockSize
-  %scevgep14 = getelementptr half, half* %pSrcA, i32 %blockSize
-  %scevgep17 = getelementptr half, half* %pSrcB, i32 %blockSize
-  %bound0 = icmp ugt half* %scevgep14, %pDst
-  %bound1 = icmp ugt half* %scevgep, %pSrcA
+  %scevgep = getelementptr half, ptr %pDst, i32 %blockSize
+  %scevgep14 = getelementptr half, ptr %pSrcA, i32 %blockSize
+  %scevgep17 = getelementptr half, ptr %pSrcB, i32 %blockSize
+  %bound0 = icmp ugt ptr %scevgep14, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrcA
   %found.conflict = and i1 %bound0, %bound1
-  %bound019 = icmp ugt half* %scevgep17, %pDst
-  %bound120 = icmp ugt half* %scevgep, %pSrcB
+  %bound019 = icmp ugt ptr %scevgep17, %pDst
+  %bound120 = icmp ugt ptr %scevgep, %pSrcB
   %found.conflict21 = and i1 %bound019, %bound120
   %conflict.rdx = or i1 %found.conflict, %found.conflict21
   br i1 %conflict.rdx, label %while.body.preheader31, label %vector.ph
@@ -195,26 +192,23 @@ vector.memcheck:                                  ; preds = %while.body.preheade
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %blockSize, -8
   %ind.end = and i32 %blockSize, 7
-  %ind.end23 = getelementptr half, half* %pSrcA, i32 %n.vec
-  %ind.end25 = getelementptr half, half* %pDst, i32 %n.vec
-  %ind.end27 = getelementptr half, half* %pSrcB, i32 %n.vec
+  %ind.end23 = getelementptr half, ptr %pSrcA, i32 %n.vec
+  %ind.end25 = getelementptr half, ptr %pDst, i32 %n.vec
+  %ind.end27 = getelementptr half, ptr %pSrcB, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr half, half* %pSrcA, i32 %index
-  %next.gep28 = getelementptr half, half* %pDst, i32 %index
-  %next.gep29 = getelementptr half, half* %pSrcB, i32 %index
-  %0 = bitcast half* %next.gep to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %0, align 2
-  %1 = bitcast half* %next.gep29 to <8 x half>*
-  %wide.load30 = load <8 x half>, <8 x half>* %1, align 2
-  %2 = fadd fast <8 x half> %wide.load30, %wide.load
-  %3 = bitcast half* %next.gep28 to <8 x half>*
-  store <8 x half> %2, <8 x half>* %3, align 2
+  %next.gep = getelementptr half, ptr %pSrcA, i32 %index
+  %next.gep28 = getelementptr half, ptr %pDst, i32 %index
+  %next.gep29 = getelementptr half, ptr %pSrcB, i32 %index
+  %wide.load = load <8 x half>, ptr %next.gep, align 2
+  %wide.load30 = load <8 x half>, ptr %next.gep29, align 2
+  %0 = fadd fast <8 x half> %wide.load30, %wide.load
+  store <8 x half> %0, ptr %next.gep28, align 2
   %index.next = add i32 %index, 8
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %middle.block, label %vector.body
+  %1 = icmp eq i32 %index.next, %n.vec
+  br i1 %1, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %blockSize
@@ -222,23 +216,23 @@ middle.block:                                     ; preds = %vector.body
 
 while.body.preheader31:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
   %blkCnt.010.ph = phi i32 [ %blockSize, %vector.memcheck ], [ %blockSize, %while.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcA.addr.09.ph = phi half* [ %pSrcA, %vector.memcheck ], [ %pSrcA, %while.body.preheader ], [ %ind.end23, %middle.block ]
-  %pDst.addr.08.ph = phi half* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end25, %middle.block ]
-  %pSrcB.addr.07.ph = phi half* [ %pSrcB, %vector.memcheck ], [ %pSrcB, %while.body.preheader ], [ %ind.end27, %middle.block ]
+  %pSrcA.addr.09.ph = phi ptr [ %pSrcA, %vector.memcheck ], [ %pSrcA, %while.body.preheader ], [ %ind.end23, %middle.block ]
+  %pDst.addr.08.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end25, %middle.block ]
+  %pSrcB.addr.07.ph = phi ptr [ %pSrcB, %vector.memcheck ], [ %pSrcB, %while.body.preheader ], [ %ind.end27, %middle.block ]
   br label %while.body
 
 while.body:                                       ; preds = %while.body.preheader31, %while.body
   %blkCnt.010 = phi i32 [ %dec, %while.body ], [ %blkCnt.010.ph, %while.body.preheader31 ]
-  %pSrcA.addr.09 = phi half* [ %incdec.ptr, %while.body ], [ %pSrcA.addr.09.ph, %while.body.preheader31 ]
-  %pDst.addr.08 = phi half* [ %incdec.ptr3, %while.body ], [ %pDst.addr.08.ph, %while.body.preheader31 ]
-  %pSrcB.addr.07 = phi half* [ %incdec.ptr1, %while.body ], [ %pSrcB.addr.07.ph, %while.body.preheader31 ]
-  %incdec.ptr = getelementptr inbounds half, half* %pSrcA.addr.09, i32 1
-  %5 = load half, half* %pSrcA.addr.09, align 2
-  %incdec.ptr1 = getelementptr inbounds half, half* %pSrcB.addr.07, i32 1
-  %6 = load half, half* %pSrcB.addr.07, align 2
-  %7 = fadd fast half %6, %5
-  %incdec.ptr3 = getelementptr inbounds half, half* %pDst.addr.08, i32 1
-  store half %7, half* %pDst.addr.08, align 2
+  %pSrcA.addr.09 = phi ptr [ %incdec.ptr, %while.body ], [ %pSrcA.addr.09.ph, %while.body.preheader31 ]
+  %pDst.addr.08 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst.addr.08.ph, %while.body.preheader31 ]
+  %pSrcB.addr.07 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrcB.addr.07.ph, %while.body.preheader31 ]
+  %incdec.ptr = getelementptr inbounds half, ptr %pSrcA.addr.09, i32 1
+  %2 = load half, ptr %pSrcA.addr.09, align 2
+  %incdec.ptr1 = getelementptr inbounds half, ptr %pSrcB.addr.07, i32 1
+  %3 = load half, ptr %pSrcB.addr.07, align 2
+  %4 = fadd fast half %3, %2
+  %incdec.ptr3 = getelementptr inbounds half, ptr %pDst.addr.08, i32 1
+  store half %4, ptr %pDst.addr.08, align 2
   %dec = add i32 %blkCnt.010, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
@@ -248,5 +242,5 @@ while.end:                                        ; preds = %while.body, %middle
 }
 
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) #1
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>) #2
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>) #3
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>) #2
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>) #3

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-constfold.ll b/llvm/test/CodeGen/Thumb2/mve-pred-constfold.ll
index 85760e1a5292e..879a49f353de3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-constfold.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-constfold.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @reg(<8 x i16> %acc0, <8 x i16> %acc1, i32* nocapture %px, i16 signext %p0) {
+define arm_aapcs_vfpcc void @reg(<8 x i16> %acc0, <8 x i16> %acc1, ptr nocapture %px, i16 signext %p0) {
 ; CHECK-LABEL: reg:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r6, r7, lr}
@@ -23,18 +23,18 @@ entry:
   %3 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc0, i32 0, <8 x i1> %2)
   %4 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc1, i32 0, <8 x i1> %0)
   %5 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc1, i32 0, <8 x i1> %2)
-  store i32 %1, i32* %px, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %px, i32 1
-  store i32 %3, i32* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %px, i32 2
-  store i32 %4, i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32, i32* %px, i32 3
-  store i32 %5, i32* %arrayidx3, align 4
+  store i32 %1, ptr %px, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %px, i32 1
+  store i32 %3, ptr %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %px, i32 2
+  store i32 %4, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %px, i32 3
+  store i32 %5, ptr %arrayidx3, align 4
   ret void
 }
 
 
-define arm_aapcs_vfpcc void @const(<8 x i16> %acc0, <8 x i16> %acc1, i32* nocapture %px, i16 signext %p0) {
+define arm_aapcs_vfpcc void @const(<8 x i16> %acc0, <8 x i16> %acc1, ptr nocapture %px, i16 signext %p0) {
 ; CHECK-LABEL: const:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r6, r7, lr}
@@ -58,13 +58,13 @@ entry:
   %6 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc0, i32 0, <8 x i1> %5)
   %7 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc1, i32 0, <8 x i1> %1)
   %8 = tail call i32 @llvm.arm.mve.addv.predicated.v8i16.v8i1(<8 x i16> %acc1, i32 0, <8 x i1> %5)
-  store i32 %2, i32* %px, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %px, i32 1
-  store i32 %6, i32* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %px, i32 2
-  store i32 %7, i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32, i32* %px, i32 3
-  store i32 %8, i32* %arrayidx3, align 4
+  store i32 %2, ptr %px, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %px, i32 1
+  store i32 %6, ptr %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %px, i32 2
+  store i32 %7, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %px, i32 3
+  store i32 %8, ptr %arrayidx3, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-convert.ll b/llvm/test/CodeGen/Thumb2/mve-pred-convert.ll
index c24f6e4642580..170af75df2ae5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-convert.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-convert.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @g(i8* %v) {
+define void @g(ptr %v) {
 ; CHECK-LABEL: g:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r0, #63
@@ -11,16 +11,16 @@ define void @g(i8* %v) {
 ; CHECK-NEXT:    vstrbt.8 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i8, i8* %v, align 1
+  %0 = load i8, ptr %v, align 1
   %conv = zext i8 %0 to i32
   %broadcast.splatinsert = insertelement <16 x i32> undef, i32 %conv, i32 0
   %broadcast.splat = shufflevector <16 x i32> %broadcast.splatinsert, <16 x i32> undef, <16 x i32> zeroinitializer
   %1 = and <16 x i32> %broadcast.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
   %2 = icmp eq <16 x i32> %1, zeroinitializer
   %3 = select <16 x i1> %2, <16 x i8> zeroinitializer, <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %3, <16 x i8>* undef, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %3, ptr undef, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>) #1
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>) #1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll b/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
index e39614efd680b..a92adf6f1a067 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-loadstore.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE
 
-define arm_aapcs_vfpcc <4 x i32> @load_v4i1(<4 x i1> *%src, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_v4i1(ptr %src, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_v4i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    ldrb r0, [r0]
@@ -43,12 +43,12 @@ define arm_aapcs_vfpcc <4 x i32> @load_v4i1(<4 x i1> *%src, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %c = load <4 x i1>, <4 x i1>* %src
+  %c = load <4 x i1>, ptr %src
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <8 x i16> @load_v8i1(<8 x i1> *%src, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @load_v8i1(ptr %src, <8 x i16> %a) {
 ; CHECK-LE-LABEL: load_v8i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    ldrb r0, [r0]
@@ -110,12 +110,12 @@ define arm_aapcs_vfpcc <8 x i16> @load_v8i1(<8 x i1> *%src, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %c = load <8 x i1>, <8 x i1>* %src
+  %c = load <8 x i1>, ptr %src
   %s = select <8 x i1> %c, <8 x i16> %a, <8 x i16> zeroinitializer
   ret <8 x i16> %s
 }
 
-define arm_aapcs_vfpcc <16 x i8> @load_v16i1(<16 x i1> *%src, <16 x i8> %a) {
+define arm_aapcs_vfpcc <16 x i8> @load_v16i1(ptr %src, <16 x i8> %a) {
 ; CHECK-LE-LABEL: load_v16i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    ldrh r0, [r0]
@@ -137,12 +137,12 @@ define arm_aapcs_vfpcc <16 x i8> @load_v16i1(<16 x i1> *%src, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %c = load <16 x i1>, <16 x i1>* %src
+  %c = load <16 x i1>, ptr %src
   %s = select <16 x i1> %c, <16 x i8> %a, <16 x i8> zeroinitializer
   ret <16 x i8> %s
 }
 
-define arm_aapcs_vfpcc <2 x i64> @load_v2i1(<2 x i1> *%src, <2 x i64> %a) {
+define arm_aapcs_vfpcc <2 x i64> @load_v2i1(ptr %src, <2 x i64> %a) {
 ; CHECK-LE-LABEL: load_v2i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    ldrb r0, [r0]
@@ -177,13 +177,13 @@ define arm_aapcs_vfpcc <2 x i64> @load_v2i1(<2 x i1> *%src, <2 x i64> %a) {
 ; CHECK-BE-NEXT:    vpsel q0, q0, q1
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %c = load <2 x i1>, <2 x i1>* %src
+  %c = load <2 x i1>, ptr %src
   %s = select <2 x i1> %c, <2 x i64> %a, <2 x i64> zeroinitializer
   ret <2 x i64> %s
 }
 
 
-define arm_aapcs_vfpcc void @store_v4i1(<4 x i1> *%dst, <4 x i32> %a) {
+define arm_aapcs_vfpcc void @store_v4i1(ptr %dst, <4 x i32> %a) {
 ; CHECK-LE-LABEL: store_v4i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vcmp.i32 eq, q0, zr
@@ -226,11 +226,11 @@ define arm_aapcs_vfpcc void @store_v4i1(<4 x i1> *%dst, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <4 x i32> %a, zeroinitializer
-  store <4 x i1> %c, <4 x i1>* %dst
+  store <4 x i1> %c, ptr %dst
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_v8i1(<8 x i1> *%dst, <8 x i16> %a) {
+define arm_aapcs_vfpcc void @store_v8i1(ptr %dst, <8 x i16> %a) {
 ; CHECK-LE-LABEL: store_v8i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vcmp.i16 eq, q0, zr
@@ -297,11 +297,11 @@ define arm_aapcs_vfpcc void @store_v8i1(<8 x i1> *%dst, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i16> %a, zeroinitializer
-  store <8 x i1> %c, <8 x i1>* %dst
+  store <8 x i1> %c, ptr %dst
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_v16i1(<16 x i1> *%dst, <16 x i8> %a) {
+define arm_aapcs_vfpcc void @store_v16i1(ptr %dst, <16 x i8> %a) {
 ; CHECK-LE-LABEL: store_v16i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vcmp.i8 eq, q0, zr
@@ -320,11 +320,11 @@ define arm_aapcs_vfpcc void @store_v16i1(<16 x i1> *%dst, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <16 x i8> %a, zeroinitializer
-  store <16 x i1> %c, <16 x i1>* %dst
+  store <16 x i1> %c, ptr %dst
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_v2i1(<2 x i1> *%dst, <2 x i64> %a) {
+define arm_aapcs_vfpcc void @store_v2i1(ptr %dst, <2 x i64> %a) {
 ; CHECK-LE-LABEL: store_v2i1:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vmov r1, r2, d0
@@ -355,11 +355,11 @@ define arm_aapcs_vfpcc void @store_v2i1(<2 x i1> *%dst, <2 x i64> %a) {
 ; CHECK-BE-NEXT:    bx lr
 entry:
   %c = icmp eq <2 x i64> %a, zeroinitializer
-  store <2 x i1> %c, <2 x i1>* %dst
+  store <2 x i1> %c, ptr %dst
   ret void
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_predcastzext(i16* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_predcastzext(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_predcastzext:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    ldrh r0, [r0]
@@ -377,14 +377,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_predcastzext(i16* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %l = load i16, i16* %i, align 4
+  %l = load i16, ptr %i, align 4
   %lz = zext i16 %l to i32
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %lz)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0]
@@ -400,13 +400,13 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %l = load i32, i32* %i, align 4
+  %l = load i32, ptr %i, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <8 x i16> @load_predcast8(i32* %i, <8 x i16> %a) {
+define arm_aapcs_vfpcc <8 x i16> @load_predcast8(ptr %i, <8 x i16> %a) {
 ; CHECK-LE-LABEL: load_predcast8:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0]
@@ -423,13 +423,13 @@ define arm_aapcs_vfpcc <8 x i16> @load_predcast8(i32* %i, <8 x i16> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %l = load i32, i32* %i, align 4
+  %l = load i32, ptr %i, align 4
   %c = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %l)
   %s = select <8 x i1> %c, <8 x i16> %a, <8 x i16> zeroinitializer
   ret <8 x i16> %s
 }
 
-define arm_aapcs_vfpcc <16 x i8> @load_predcast16(i32* %i, <16 x i8> %a) {
+define arm_aapcs_vfpcc <16 x i8> @load_predcast16(ptr %i, <16 x i8> %a) {
 ; CHECK-LE-LABEL: load_predcast16:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0]
@@ -446,13 +446,13 @@ define arm_aapcs_vfpcc <16 x i8> @load_predcast16(i32* %i, <16 x i8> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.8 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %l = load i32, i32* %i, align 4
+  %l = load i32, ptr %i, align 4
   %c = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %l)
   %s = select <16 x i1> %c, <16 x i8> %a, <16 x i8> zeroinitializer
   ret <16 x i8> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_align2(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_align2(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_align2:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    ldr r0, [r0]
@@ -470,13 +470,13 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_align2(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %l = load i32, i32* %i, align 2
+  %l = load i32, ptr %i, align 2
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_offset(i16* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_offset(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_offset:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    adds r0, #6
@@ -494,15 +494,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_offset(i16* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i16, i16* %i, i32 3
-  %gb = bitcast i16* %g to i32*
-  %l = load i32, i32* %gb, align 4
+  %g = getelementptr inbounds i16, ptr %i, i32 3
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_range4(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_range4(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_range4:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0, #4]
@@ -518,14 +517,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_range4(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i32, i32* %i, i32 1
-  %l = load i32, i32* %g, align 4
+  %g = getelementptr inbounds i32, ptr %i, i32 1
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_range(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_range(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_range:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0, #508]
@@ -541,14 +540,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_range(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i32, i32* %i, i32 127
-  %l = load i32, i32* %g, align 4
+  %g = getelementptr inbounds i32, ptr %i, i32 127
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_range2(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_range2(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_range2:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    vldr p0, [r0, #-508]
@@ -564,14 +563,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_range2(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i32, i32* %i, i32 -127
-  %l = load i32, i32* %g, align 4
+  %g = getelementptr inbounds i32, ptr %i, i32 -127
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_range3(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_range3(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_range3:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    add.w r0, r0, #512
@@ -589,14 +588,14 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_range3(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i32, i32* %i, i32 128
-  %l = load i32, i32* %g, align 4
+  %g = getelementptr inbounds i32, ptr %i, i32 128
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s
 }
 
-define arm_aapcs_vfpcc <4 x i32> @load_bc4_range5(i32* %i, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x i32> @load_bc4_range5(ptr %i, <4 x i32> %a) {
 ; CHECK-LE-LABEL: load_bc4_range5:
 ; CHECK-LE:       @ %bb.0:
 ; CHECK-LE-NEXT:    sub.w r0, r0, #512
@@ -614,8 +613,8 @@ define arm_aapcs_vfpcc <4 x i32> @load_bc4_range5(i32* %i, <4 x i32> %a) {
 ; CHECK-BE-NEXT:    vpsel q1, q1, q0
 ; CHECK-BE-NEXT:    vrev64.32 q0, q1
 ; CHECK-BE-NEXT:    bx lr
-  %g = getelementptr inbounds i32, i32* %i, i32 -128
-  %l = load i32, i32* %g, align 4
+  %g = getelementptr inbounds i32, ptr %i, i32 -128
+  %l = load i32, ptr %g, align 4
   %c = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %l)
   %s = select <4 x i1> %c, <4 x i32> %a, <4 x i32> zeroinitializer
   ret <4 x i32> %s

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll b/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
index 43c905a43293e..566b4bce71cd0 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-threshold.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @thres_i32(i32* %data, i16 zeroext %N, i32 %T) {
+define arm_aapcs_vfpcc void @thres_i32(ptr %data, i16 zeroext %N, i32 %T) {
 ; CHECK-LABEL: thres_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -41,23 +41,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %data, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = icmp slt <4 x i32> %wide.load, %broadcast.splat18
-  %3 = icmp sgt <4 x i32> %wide.load, %broadcast.splat20
-  %4 = or <4 x i1> %2, %3
-  %5 = bitcast i32* %0 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> zeroinitializer, <4 x i32>* %5, i32 4, <4 x i1> %4)
+  %0 = getelementptr inbounds i32, ptr %data, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = icmp slt <4 x i32> %wide.load, %broadcast.splat18
+  %2 = icmp sgt <4 x i32> %wide.load, %broadcast.splat20
+  %3 = or <4 x i1> %1, %2
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr %0, i32 4, <4 x i1> %3)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_i16(i16* %data, i16 zeroext %N, i16 signext %T) {
+define arm_aapcs_vfpcc void @thresh_i16(ptr %data, i16 zeroext %N, i16 signext %T) {
 ; CHECK-LABEL: thresh_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -97,23 +95,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %data, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = icmp slt <8 x i16> %wide.load, %broadcast.splat25
-  %3 = icmp sgt <8 x i16> %wide.load, %broadcast.splat27
-  %4 = or <8 x i1> %2, %3
-  %5 = bitcast i16* %0 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> zeroinitializer, <8 x i16>* %5, i32 2, <8 x i1> %4)
+  %0 = getelementptr inbounds i16, ptr %data, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = icmp slt <8 x i16> %wide.load, %broadcast.splat25
+  %2 = icmp sgt <8 x i16> %wide.load, %broadcast.splat27
+  %3 = or <8 x i1> %1, %2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> zeroinitializer, ptr %0, i32 2, <8 x i1> %3)
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_i8(i8* %data, i16 zeroext %N, i8 signext %T) {
+define arm_aapcs_vfpcc void @thresh_i8(ptr %data, i16 zeroext %N, i8 signext %T) {
 ; CHECK-LABEL: thresh_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -153,23 +149,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %data, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = icmp slt <16 x i8> %wide.load, %broadcast.splat23
-  %3 = icmp sgt <16 x i8> %wide.load, %broadcast.splat25
-  %4 = or <16 x i1> %2, %3
-  %5 = bitcast i8* %0 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> zeroinitializer, <16 x i8>* %5, i32 1, <16 x i1> %4)
+  %0 = getelementptr inbounds i8, ptr %data, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = icmp slt <16 x i8> %wide.load, %broadcast.splat23
+  %2 = icmp sgt <16 x i8> %wide.load, %broadcast.splat25
+  %3 = or <16 x i1> %1, %2
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> zeroinitializer, ptr %0, i32 1, <16 x i1> %3)
   %index.next = add i32 %index, 16
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_f32(float* %data, i16 zeroext %N, float %T) {
+define arm_aapcs_vfpcc void @thresh_f32(ptr %data, i16 zeroext %N, float %T) {
 ; CHECK-LABEL: thresh_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -210,23 +204,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %data, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = fcmp fast olt <4 x float> %wide.load, %broadcast.splat18
-  %3 = fcmp fast ogt <4 x float> %wide.load, %broadcast.splat20
-  %4 = or <4 x i1> %2, %3
-  %5 = bitcast float* %0 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> zeroinitializer, <4 x float>* %5, i32 4, <4 x i1> %4)
+  %0 = getelementptr inbounds float, ptr %data, i32 %index
+  %wide.load = load <4 x float>, ptr %0, align 4
+  %1 = fcmp fast olt <4 x float> %wide.load, %broadcast.splat18
+  %2 = fcmp fast ogt <4 x float> %wide.load, %broadcast.splat20
+  %3 = or <4 x i1> %1, %2
+  call void @llvm.masked.store.v4f32.p0(<4 x float> zeroinitializer, ptr %0, i32 4, <4 x i1> %3)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_f16(half* %data, i16 zeroext %N, float %T.coerce) {
+define arm_aapcs_vfpcc void @thresh_f16(ptr %data, i16 zeroext %N, float %T.coerce) {
 ; CHECK-LABEL: thresh_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -271,17 +263,15 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds half, half* %data, i32 %index
-  %3 = bitcast half* %2 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %3, align 2
-  %4 = fcmp fast olt <8 x half> %wide.load, %broadcast.splat20
-  %5 = fcmp fast ogt <8 x half> %wide.load, %broadcast.splat22
-  %6 = or <8 x i1> %4, %5
-  %7 = bitcast half* %2 to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> zeroinitializer, <8 x half>* %7, i32 2, <8 x i1> %6)
+  %2 = getelementptr inbounds half, ptr %data, i32 %index
+  %wide.load = load <8 x half>, ptr %2, align 2
+  %3 = fcmp fast olt <8 x half> %wide.load, %broadcast.splat20
+  %4 = fcmp fast ogt <8 x half> %wide.load, %broadcast.splat22
+  %5 = or <8 x i1> %3, %4
+  call void @llvm.masked.store.v8f16.p0(<8 x half> zeroinitializer, ptr %2, i32 2, <8 x i1> %5)
   %index.next = add i32 %index, 8
-  %8 = icmp eq i32 %index.next, %mul
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %mul
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -289,7 +279,7 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 
 
 
-define arm_aapcs_vfpcc void @thres_rev_i32(i32* %data, i16 zeroext %N, i32 %T) {
+define arm_aapcs_vfpcc void @thres_rev_i32(ptr %data, i16 zeroext %N, i32 %T) {
 ; CHECK-LABEL: thres_rev_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -329,23 +319,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %data, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = icmp sgt <4 x i32> %broadcast.splat18, %wide.load
-  %3 = icmp slt <4 x i32> %broadcast.splat20, %wide.load
-  %4 = or <4 x i1> %2, %3
-  %5 = bitcast i32* %0 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> zeroinitializer, <4 x i32>* %5, i32 4, <4 x i1> %4)
+  %0 = getelementptr inbounds i32, ptr %data, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = icmp sgt <4 x i32> %broadcast.splat18, %wide.load
+  %2 = icmp slt <4 x i32> %broadcast.splat20, %wide.load
+  %3 = or <4 x i1> %1, %2
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr %0, i32 4, <4 x i1> %3)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_rev_i16(i16* %data, i16 zeroext %N, i16 signext %T) {
+define arm_aapcs_vfpcc void @thresh_rev_i16(ptr %data, i16 zeroext %N, i16 signext %T) {
 ; CHECK-LABEL: thresh_rev_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -385,23 +373,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %data, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = icmp sgt <8 x i16> %broadcast.splat25, %wide.load
-  %3 = icmp slt <8 x i16> %broadcast.splat27, %wide.load
-  %4 = or <8 x i1> %2, %3
-  %5 = bitcast i16* %0 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> zeroinitializer, <8 x i16>* %5, i32 2, <8 x i1> %4)
+  %0 = getelementptr inbounds i16, ptr %data, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = icmp sgt <8 x i16> %broadcast.splat25, %wide.load
+  %2 = icmp slt <8 x i16> %broadcast.splat27, %wide.load
+  %3 = or <8 x i1> %1, %2
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> zeroinitializer, ptr %0, i32 2, <8 x i1> %3)
   %index.next = add i32 %index, 8
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_rev_i8(i8* %data, i16 zeroext %N, i8 signext %T) {
+define arm_aapcs_vfpcc void @thresh_rev_i8(ptr %data, i16 zeroext %N, i8 signext %T) {
 ; CHECK-LABEL: thresh_rev_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -441,23 +427,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %data, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = icmp sgt <16 x i8> %broadcast.splat23, %wide.load
-  %3 = icmp slt <16 x i8> %broadcast.splat25, %wide.load
-  %4 = or <16 x i1> %2, %3
-  %5 = bitcast i8* %0 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> zeroinitializer, <16 x i8>* %5, i32 1, <16 x i1> %4)
+  %0 = getelementptr inbounds i8, ptr %data, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = icmp sgt <16 x i8> %broadcast.splat23, %wide.load
+  %2 = icmp slt <16 x i8> %broadcast.splat25, %wide.load
+  %3 = or <16 x i1> %1, %2
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> zeroinitializer, ptr %0, i32 1, <16 x i1> %3)
   %index.next = add i32 %index, 16
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_rev_f32(float* %data, i16 zeroext %N, float %T) {
+define arm_aapcs_vfpcc void @thresh_rev_f32(ptr %data, i16 zeroext %N, float %T) {
 ; CHECK-LABEL: thresh_rev_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -498,23 +482,21 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float, float* %data, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %1, align 4
-  %2 = fcmp fast ogt <4 x float> %broadcast.splat18, %wide.load
-  %3 = fcmp fast olt <4 x float> %broadcast.splat20, %wide.load
-  %4 = or <4 x i1> %2, %3
-  %5 = bitcast float* %0 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> zeroinitializer, <4 x float>* %5, i32 4, <4 x i1> %4)
+  %0 = getelementptr inbounds float, ptr %data, i32 %index
+  %wide.load = load <4 x float>, ptr %0, align 4
+  %1 = fcmp fast ogt <4 x float> %broadcast.splat18, %wide.load
+  %2 = fcmp fast olt <4 x float> %broadcast.splat20, %wide.load
+  %3 = or <4 x i1> %1, %2
+  call void @llvm.masked.store.v4f32.p0(<4 x float> zeroinitializer, ptr %0, i32 4, <4 x i1> %3)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %mul
-  br i1 %6, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %mul
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @thresh_rev_f16(half* %data, i16 zeroext %N, float %T.coerce) {
+define arm_aapcs_vfpcc void @thresh_rev_f16(ptr %data, i16 zeroext %N, float %T.coerce) {
 ; CHECK-LABEL: thresh_rev_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -559,17 +541,15 @@ vector.ph:                                        ; preds = %entry
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds half, half* %data, i32 %index
-  %3 = bitcast half* %2 to <8 x half>*
-  %wide.load = load <8 x half>, <8 x half>* %3, align 2
-  %4 = fcmp fast ogt <8 x half> %broadcast.splat20, %wide.load
-  %5 = fcmp fast olt <8 x half> %broadcast.splat22, %wide.load
-  %6 = or <8 x i1> %4, %5
-  %7 = bitcast half* %2 to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> zeroinitializer, <8 x half>* %7, i32 2, <8 x i1> %6)
+  %2 = getelementptr inbounds half, ptr %data, i32 %index
+  %wide.load = load <8 x half>, ptr %2, align 2
+  %3 = fcmp fast ogt <8 x half> %broadcast.splat20, %wide.load
+  %4 = fcmp fast olt <8 x half> %broadcast.splat22, %wide.load
+  %5 = or <8 x i1> %3, %4
+  call void @llvm.masked.store.v8f16.p0(<8 x half> zeroinitializer, ptr %2, i32 2, <8 x i1> %5)
   %index.next = add i32 %index, 8
-  %8 = icmp eq i32 %index.next, %mul
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %mul
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
@@ -578,8 +558,8 @@ for.cond.cleanup:                                 ; preds = %vector.body, %entry
 
 
 
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-qrintr.ll b/llvm/test/CodeGen/Thumb2/mve-qrintr.ll
index 06b148d10662d..151e51fcf0c93 100644
--- a/llvm/test/CodeGen/Thumb2/mve-qrintr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-qrintr.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @vaddq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vaddq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vaddq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -29,23 +29,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = add <4 x i32> %2, %.splat
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = add <4 x i32> %1, %.splat
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vadd(i32* %s1, i32 %c0, i32 %N) {
+define void @vadd(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vadd:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -73,14 +71,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.add.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.add.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -89,7 +86,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vsubq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vsubq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vsubq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -117,23 +114,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = sub <4 x i32> %2, %.splat
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = sub <4 x i32> %1, %.splat
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vsub(i32* %s1, i32 %c0, i32 %N) {
+define void @vsub(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vsub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -161,14 +156,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.sub.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.sub.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -177,7 +171,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vmulq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vmulq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vmulq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -205,23 +199,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = mul <4 x i32> %2, %.splat
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = mul <4 x i32> %1, %.splat
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vmul(i32* %s1, i32 %c0, i32 %N) {
+define void @vmul(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vmul:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -249,14 +241,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.mul.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.mul.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -265,7 +256,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vqaddq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqaddq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqaddq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -293,23 +284,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqaddqu(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqaddqu(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqaddqu:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -337,23 +326,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqadd(i32* %s1, i32 %c0, i32 %N) {
+define void @vqadd(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vqadd:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -381,14 +368,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.qadd.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.qadd.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -397,7 +383,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vqsubq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqsubq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqsubq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -425,23 +411,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqsubqu(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqsubqu(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqsubqu:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -469,23 +453,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqsub(i32* %s1, i32 %c0, i32 %N) {
+define void @vqsub(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vqsub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -513,14 +495,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.qsub.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.qsub.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -529,7 +510,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vhaddq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vhaddq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vhaddq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -557,23 +538,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.arm.mve.vhadd.v4i32(<4 x i32> %2, <4 x i32> %.splat, i32 0)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.arm.mve.vhadd.v4i32(<4 x i32> %1, <4 x i32> %.splat, i32 0)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vhadd(i32* %s1, i32 %c0, i32 %N) {
+define void @vhadd(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vhadd:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -601,14 +580,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.hadd.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.hadd.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -617,7 +595,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vhsubq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vhsubq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vhsubq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -645,23 +623,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.arm.mve.vhsub.v4i32(<4 x i32> %2, <4 x i32> %.splat, i32 0)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.arm.mve.vhsub.v4i32(<4 x i32> %1, <4 x i32> %.splat, i32 0)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vhsub(i32* %s1, i32 %c0, i32 %N) {
+define void @vhsub(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vhsub:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -689,14 +665,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.hsub.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.hsub.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, i32 0, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -705,7 +680,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vqdmullbq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqdmullbq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqdmullbq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -733,25 +708,23 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <2 x i64> @llvm.arm.mve.vqdmull.v2i64.v4i32(<4 x i32> %2, <4 x i32> %.splat, i32 0)
-  %4 = bitcast <2 x i64> %3 to <4 x i32>
-  %5 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %4, <4 x i32>* %5, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <2 x i64> @llvm.arm.mve.vqdmull.v2i64.v4i32(<4 x i32> %1, <4 x i32> %.splat, i32 0)
+  %3 = bitcast <2 x i64> %2 to <4 x i32>
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
 
-define void @vqdmull(i32* %s1, i32 %c0, i32 %N) {
+define void @vqdmull(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vqdmull:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -780,17 +753,15 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i16>*
-  %2 = tail call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %1, i32 2, <4 x i1> %0, <4 x i16> zeroinitializer)
-  %3 = sext <4 x i16> %2 to <4 x i32>
-  %4 = bitcast <4 x i32> %3 to <8 x i16>
-  %5 = tail call <4 x i32> @llvm.arm.mve.vqdmull.predicated.v4i32.v8i16.v4i1(<8 x i16> %4, <8 x i16> %.splat, i32 0, <4 x i1> %0, <4 x i32> %3)
-  %6 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %5, <4 x i32>* %6, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %s1.addr.013, i32 2, <4 x i1> %0, <4 x i16> zeroinitializer)
+  %2 = sext <4 x i16> %1 to <4 x i32>
+  %3 = bitcast <4 x i32> %2 to <8 x i16>
+  %4 = tail call <4 x i32> @llvm.arm.mve.vqdmull.predicated.v4i32.v8i16.v4i1(<8 x i16> %3, <8 x i16> %.splat, i32 0, <4 x i1> %0, <4 x i32> %2)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -799,7 +770,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vqdmulhq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqdmulhq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqdmulhq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -827,23 +798,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.arm.mve.vqdmulh.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqdmulh.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqdmulh(i32* %s1, i32 %c0, i32 %N) {
+define void @vqdmulh(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vqdmulh:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -871,14 +840,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.qdmulh.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.qdmulh.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -887,7 +855,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vqrdmulhq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vqrdmulhq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vqrdmulhq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -915,23 +883,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast i32* %x.addr.014 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.014, i32 4
-  %3 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %2, <4 x i32> %.splat)
-  %4 = bitcast i32* %y.addr.013 to <4 x i32>*
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.014, i32 4
+  %2 = tail call <4 x i32> @llvm.arm.mve.vqrdmulh.v4i32(<4 x i32> %1, <4 x i32> %.splat)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vqrdmulh(i32* %s1, i32 %c0, i32 %N) {
+define void @vqrdmulh(ptr %s1, i32 %c0, i32 %N) {
 ; CHECK-LABEL: vqrdmulh:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -959,14 +925,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi i32* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast i32* %s1.addr.013 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %3 = tail call <4 x i32> @llvm.arm.mve.qrdmulh.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %2)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds i32, i32* %s1.addr.013, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %2 = tail call <4 x i32> @llvm.arm.mve.qrdmulh.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %.splat, <4 x i1> %0, <4 x i32> %1)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds i32, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -975,7 +940,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vmlaq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vmlaq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vmlaq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1004,25 +969,23 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.017 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.016 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.017 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.016 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.015 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.015)
-  %1 = bitcast i32* %x.addr.017 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.017, i32 4
-  %3 = bitcast i32* %y.addr.016 to <4 x i32>*
-  %4 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %5 = mul <4 x i32> %4, %.splat
-  %6 = add <4 x i32> %5, %2
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.016, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.017, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.017, i32 4
+  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %y.addr.016, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %3 = mul <4 x i32> %2, %.splat
+  %4 = add <4 x i32> %3, %1
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %y.addr.016, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.016, i32 4
   %sub = add nsw i32 %i.015, -4
   %cmp = icmp sgt i32 %i.015, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vmlaqp(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vmlaqp(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vmlaqp:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1049,24 +1012,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.018 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.017 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.018 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.017 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.016 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.016)
-  %1 = bitcast i32* %x.addr.018 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.018, i32 4
-  %3 = bitcast i32* %y.addr.017 to <4 x i32>*
-  %4 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %5 = tail call <4 x i32> @llvm.arm.mve.vmla.n.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %4, i32 %z, <4 x i1> %0)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %5, <4 x i32>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.017, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.018, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.018, i32 4
+  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %y.addr.017, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %3 = tail call <4 x i32> @llvm.arm.mve.vmla.n.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %2, i32 %z, <4 x i1> %0)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %y.addr.017, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.017, i32 4
   %sub = add nsw i32 %i.016, -4
   %cmp = icmp sgt i32 %i.016, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vmlasq(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vmlasq(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vmlasq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1095,25 +1056,23 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.017 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.016 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.017 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.016 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.015 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.015)
-  %1 = bitcast i32* %x.addr.017 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.017, i32 4
-  %3 = bitcast i32* %y.addr.016 to <4 x i32>*
-  %4 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %5 = mul <4 x i32> %4, %2
-  %6 = add <4 x i32> %5, %.splat
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.016, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.017, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.017, i32 4
+  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %y.addr.016, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %3 = mul <4 x i32> %2, %1
+  %4 = add <4 x i32> %3, %.splat
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %y.addr.016, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.016, i32 4
   %sub = add nsw i32 %i.015, -4
   %cmp = icmp sgt i32 %i.015, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vmlasqp(i32* %x, i32* %y, i32 %n, i32 %z) {
+define void @vmlasqp(ptr %x, ptr %y, i32 %n, i32 %z) {
 ; CHECK-LABEL: vmlasqp:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1140,24 +1099,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.018 = phi i32* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.017 = phi i32* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.018 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.017 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.016 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.016)
-  %1 = bitcast i32* %x.addr.018 to <4 x i32>*
-  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %add.ptr = getelementptr inbounds i32, i32* %x.addr.018, i32 4
-  %3 = bitcast i32* %y.addr.017 to <4 x i32>*
-  %4 = tail call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
-  %5 = tail call <4 x i32> @llvm.arm.mve.vmlas.n.predicated.v4i32.v4i1(<4 x i32> %2, <4 x i32> %4, i32 %z, <4 x i1> %0)
-  tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %5, <4 x i32>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds i32, i32* %y.addr.017, i32 4
+  %1 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %x.addr.018, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %add.ptr = getelementptr inbounds i32, ptr %x.addr.018, i32 4
+  %2 = tail call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %y.addr.017, i32 4, <4 x i1> %0, <4 x i32> zeroinitializer)
+  %3 = tail call <4 x i32> @llvm.arm.mve.vmlas.n.predicated.v4i32.v4i1(<4 x i32> %1, <4 x i32> %2, i32 %z, <4 x i1> %0)
+  tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %3, ptr %y.addr.017, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds i32, ptr %y.addr.017, i32 4
   %sub = add nsw i32 %i.016, -4
   %cmp = icmp sgt i32 %i.016, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vaddqf(float* %x, float* %y, i32 %n, float %z) {
+define void @vaddqf(ptr %x, ptr %y, i32 %n, float %z) {
 ; CHECK-LABEL: vaddqf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1185,23 +1142,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast float* %x.addr.014 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.014, i32 4
-  %3 = fadd fast <4 x float> %2, %.splat
-  %4 = bitcast float* %y.addr.013 to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.014, i32 4
+  %2 = fadd fast <4 x float> %1, %.splat
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vaddf(float* %s1, float %c0, i32 %N) {
+define void @vaddf(ptr %s1, float %c0, i32 %N) {
 ; CHECK-LABEL: vaddf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1229,14 +1184,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi float* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast float* %s1.addr.013 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %3 = tail call fast <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %2, <4 x float> %.splat, <4 x i1> %0, <4 x float> %2)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds float, float* %s1.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %2 = tail call fast <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %1, <4 x float> %.splat, <4 x i1> %0, <4 x float> %1)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds float, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -1245,7 +1199,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vsubqf(float* %x, float* %y, i32 %n, float %z) {
+define void @vsubqf(ptr %x, ptr %y, i32 %n, float %z) {
 ; CHECK-LABEL: vsubqf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1273,23 +1227,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast float* %x.addr.014 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.014, i32 4
-  %3 = fsub fast <4 x float> %2, %.splat
-  %4 = bitcast float* %y.addr.013 to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.014, i32 4
+  %2 = fsub fast <4 x float> %1, %.splat
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vsubf(float* %s1, float %c0, i32 %N) {
+define void @vsubf(ptr %s1, float %c0, i32 %N) {
 ; CHECK-LABEL: vsubf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1317,14 +1269,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi float* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast float* %s1.addr.013 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %3 = tail call fast <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %2, <4 x float> %.splat, <4 x i1> %0, <4 x float> %2)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds float, float* %s1.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %2 = tail call fast <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %1, <4 x float> %.splat, <4 x i1> %0, <4 x float> %1)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds float, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -1333,7 +1284,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vmulqf(float* %x, float* %y, i32 %n, float %z) {
+define void @vmulqf(ptr %x, ptr %y, i32 %n, float %z) {
 ; CHECK-LABEL: vmulqf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1361,23 +1312,21 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.014 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.013 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.014 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.013 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.012 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.012)
-  %1 = bitcast float* %x.addr.014 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.014, i32 4
-  %3 = fmul fast <4 x float> %2, %.splat
-  %4 = bitcast float* %y.addr.013 to <4 x float>*
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %4, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %x.addr.014, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.014, i32 4
+  %2 = fmul fast <4 x float> %1, %.splat
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %y.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.013, i32 4
   %sub = add nsw i32 %i.012, -4
   %cmp = icmp sgt i32 %i.012, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vmulf(float* %s1, float %c0, i32 %N) {
+define void @vmulf(ptr %s1, float %c0, i32 %N) {
 ; CHECK-LABEL: vmulf:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1405,14 +1354,13 @@ while.body.lr.ph:                                 ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.013 = phi float* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.013 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.012 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.012)
-  %1 = bitcast float* %s1.addr.013 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %3 = tail call fast <4 x float> @llvm.arm.mve.mul.predicated.v4f32.v4i1(<4 x float> %2, <4 x float> %.splat, <4 x i1> %0, <4 x float> %2)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %3, <4 x float>* %1, i32 4, <4 x i1> %0)
-  %add.ptr = getelementptr inbounds float, float* %s1.addr.013, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s1.addr.013, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %2 = tail call fast <4 x float> @llvm.arm.mve.mul.predicated.v4f32.v4i1(<4 x float> %1, <4 x float> %.splat, <4 x i1> %0, <4 x float> %1)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %s1.addr.013, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds float, ptr %s1.addr.013, i32 4
   %sub = add nsw i32 %N.addr.012, -4
   %cmp = icmp sgt i32 %N.addr.012, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -1421,7 +1369,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vfmaq(float* %x, float* %y, i32 %n, float %z) {
+define void @vfmaq(ptr %x, ptr %y, i32 %n, float %z) {
 ; CHECK-LABEL: vfmaq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1450,24 +1398,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.017 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.016 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.017 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.016 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.015 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.015)
-  %1 = bitcast float* %x.addr.017 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.017, i32 4
-  %3 = bitcast float* %y.addr.016 to <4 x float>*
-  %4 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %3, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %5 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %4, <4 x float> %.splat, <4 x float> %2)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.016, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %x.addr.017, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.017, i32 4
+  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %y.addr.016, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %3 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %2, <4 x float> %.splat, <4 x float> %1)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %y.addr.016, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.016, i32 4
   %sub = add nsw i32 %i.015, -4
   %cmp = icmp sgt i32 %i.015, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vfma(float* %s1, float* %s2, float %c0, i32 %N) {
+define void @vfma(ptr %s1, ptr %s2, float %c0, i32 %N) {
 ; CHECK-LABEL: vfma:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1491,21 +1437,19 @@ entry:
   br i1 %cmp12, label %while.body.lr.ph, label %while.end
 
 while.body.lr.ph:                                 ; preds = %entry
-  %0 = bitcast float* %s2 to <4 x float>*
   %.splatinsert = insertelement <4 x float> undef, float %c0, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.014 = phi float* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.014 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.013 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
-  %1 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.013)
-  %2 = bitcast float* %s1.addr.014 to <4 x float>*
-  %3 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
-  %4 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
-  %5 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %4, <4 x float> %.splat, <4 x float> %3, <4 x i1> %1)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %2, i32 4, <4 x i1> %1)
-  %add.ptr = getelementptr inbounds float, float* %s1.addr.014, i32 4
+  %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.013)
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s1.addr.014, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s2, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %3 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %2, <4 x float> %.splat, <4 x float> %1, <4 x i1> %0)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %s1.addr.014, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds float, ptr %s1.addr.014, i32 4
   %sub = add nsw i32 %N.addr.013, -4
   %cmp = icmp sgt i32 %N.addr.013, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -1514,7 +1458,7 @@ while.end:                                        ; preds = %while.body, %entry
   ret void
 }
 
-define void @vfmasq(float* %x, float* %y, i32 %n, float %z) {
+define void @vfmasq(ptr %x, ptr %y, i32 %n, float %z) {
 ; CHECK-LABEL: vfmasq:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1543,24 +1487,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
   ret void
 
 for.body:                                         ; preds = %entry, %for.body
-  %x.addr.017 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ]
-  %y.addr.016 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ]
+  %x.addr.017 = phi ptr [ %add.ptr, %for.body ], [ %x, %entry ]
+  %y.addr.016 = phi ptr [ %add.ptr1, %for.body ], [ %y, %entry ]
   %i.015 = phi i32 [ %sub, %for.body ], [ %n, %entry ]
   %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %i.015)
-  %1 = bitcast float* %x.addr.017 to <4 x float>*
-  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %add.ptr = getelementptr inbounds float, float* %x.addr.017, i32 4
-  %3 = bitcast float* %y.addr.016 to <4 x float>*
-  %4 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %3, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
-  %5 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %2, <4 x float> %4, <4 x float> %.splat)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %3, i32 4, <4 x i1> %0)
-  %add.ptr1 = getelementptr inbounds float, float* %y.addr.016, i32 4
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %x.addr.017, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %add.ptr = getelementptr inbounds float, ptr %x.addr.017, i32 4
+  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %y.addr.016, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %3 = tail call fast <4 x float> @llvm.fma.v4f32(<4 x float> %1, <4 x float> %2, <4 x float> %.splat)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %y.addr.016, i32 4, <4 x i1> %0)
+  %add.ptr1 = getelementptr inbounds float, ptr %y.addr.016, i32 4
   %sub = add nsw i32 %i.015, -4
   %cmp = icmp sgt i32 %i.015, 4
   br i1 %cmp, label %for.body, label %for.cond.cleanup
 }
 
-define void @vfmas(float* %s1, float* %s2, float %c0, i32 %N) {
+define void @vfmas(ptr %s1, ptr %s2, float %c0, i32 %N) {
 ; CHECK-LABEL: vfmas:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -1584,21 +1526,19 @@ entry:
   br i1 %cmp12, label %while.body.lr.ph, label %while.end
 
 while.body.lr.ph:                                 ; preds = %entry
-  %0 = bitcast float* %s2 to <4 x float>*
   %.splatinsert = insertelement <4 x float> undef, float %c0, i32 0
   %.splat = shufflevector <4 x float> %.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
   br label %while.body
 
 while.body:                                       ; preds = %while.body.lr.ph, %while.body
-  %s1.addr.014 = phi float* [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
+  %s1.addr.014 = phi ptr [ %s1, %while.body.lr.ph ], [ %add.ptr, %while.body ]
   %N.addr.013 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
-  %1 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.013)
-  %2 = bitcast float* %s1.addr.014 to <4 x float>*
-  %3 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %2, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
-  %4 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %1, <4 x float> zeroinitializer)
-  %5 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %3, <4 x float> %4, <4 x float> %.splat, <4 x i1> %1)
-  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %2, i32 4, <4 x i1> %1)
-  %add.ptr = getelementptr inbounds float, float* %s1.addr.014, i32 4
+  %0 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %N.addr.013)
+  %1 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s1.addr.014, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %2 = tail call fast <4 x float> @llvm.masked.load.v4f32.p0(ptr %s2, i32 4, <4 x i1> %0, <4 x float> zeroinitializer)
+  %3 = tail call fast <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float> %1, <4 x float> %2, <4 x float> %.splat, <4 x i1> %0)
+  tail call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %s1.addr.014, i32 4, <4 x i1> %0)
+  %add.ptr = getelementptr inbounds float, ptr %s1.addr.014, i32 4
   %sub = add nsw i32 %N.addr.013, -4
   %cmp = icmp sgt i32 %N.addr.013, 4
   br i1 %cmp, label %while.body, label %while.end
@@ -1608,11 +1548,11 @@ while.end:                                        ; preds = %while.body, %entry
 }
 
 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
 
 declare <4 x i32> @llvm.arm.mve.add.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>)
 declare <4 x i32> @llvm.arm.mve.sub.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll
index 09d1204d421eb..86319b01f4820 100644
--- a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
 
-define arm_aapcs_vfpcc void @ssatmul_s_q31(i32* nocapture readonly %pSrcA, i32* nocapture readonly %pSrcB, i32* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_s_q31(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_s_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -118,34 +118,31 @@ entry:
 
 vector.ph:                                        ; preds = %entry
   %n.vec = and i32 %N, -2
-  %ind.end = getelementptr i32, i32* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i32, i32* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i32, i32* %pDst, i32 %n.vec
+  %ind.end = getelementptr i32, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i32, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i32, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i32, i32* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i32, i32* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i32, i32* %pDst, i32 %index
-  %0 = bitcast i32* %next.gep to <2 x i32>*
-  %wide.load = load <2 x i32>, <2 x i32>* %0, align 4
-  %1 = sext <2 x i32> %wide.load to <2 x i64>
-  %2 = bitcast i32* %next.gep18 to <2 x i32>*
-  %wide.load20 = load <2 x i32>, <2 x i32>* %2, align 4
-  %3 = sext <2 x i32> %wide.load20 to <2 x i64>
-  %4 = mul nsw <2 x i64> %3, %1
-  %5 = ashr <2 x i64> %4, <i64 31, i64 31>
-  %6 = icmp sgt <2 x i64> %5, <i64 -2147483648, i64 -2147483648>
-  %7 = select <2 x i1> %6, <2 x i64> %5, <2 x i64> <i64 -2147483648, i64 -2147483648>
-  %8 = icmp slt <2 x i64> %7, <i64 2147483647, i64 2147483647>
-  %9 = select <2 x i1> %8, <2 x i64> %7, <2 x i64> <i64 2147483647, i64 2147483647>
-  %10 = trunc <2 x i64> %9 to <2 x i32>
-  %11 = bitcast i32* %next.gep19 to <2 x i32>*
-  store <2 x i32> %10, <2 x i32>* %11, align 4
+  %next.gep = getelementptr i32, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i32, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i32, ptr %pDst, i32 %index
+  %wide.load = load <2 x i32>, ptr %next.gep, align 4
+  %0 = sext <2 x i32> %wide.load to <2 x i64>
+  %wide.load20 = load <2 x i32>, ptr %next.gep18, align 4
+  %1 = sext <2 x i32> %wide.load20 to <2 x i64>
+  %2 = mul nsw <2 x i64> %1, %0
+  %3 = ashr <2 x i64> %2, <i64 31, i64 31>
+  %4 = icmp sgt <2 x i64> %3, <i64 -2147483648, i64 -2147483648>
+  %5 = select <2 x i1> %4, <2 x i64> %3, <2 x i64> <i64 -2147483648, i64 -2147483648>
+  %6 = icmp slt <2 x i64> %5, <i64 2147483647, i64 2147483647>
+  %7 = select <2 x i1> %6, <2 x i64> %5, <2 x i64> <i64 2147483647, i64 2147483647>
+  %8 = trunc <2 x i64> %7 to <2 x i32>
+  store <2 x i32> %8, ptr %next.gep19, align 4
   %index.next = add i32 %index, 2
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -153,9 +150,9 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader:                               ; preds = %entry, %middle.block
   %i.012.ph = phi i32 [ 0, %entry ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i32* [ %pSrcA, %entry ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i32* [ %pSrcB, %entry ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i32* [ %pDst, %entry ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %entry ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %entry ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %entry ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -163,30 +160,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader ]
-  %pSrcA.addr.011 = phi i32* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader ]
-  %pSrcB.addr.010 = phi i32* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader ]
-  %pDst.addr.09 = phi i32* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %pSrcA.addr.011, i32 1
-  %13 = load i32, i32* %pSrcA.addr.011, align 4
-  %conv = sext i32 %13 to i64
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %pSrcB.addr.010, i32 1
-  %14 = load i32, i32* %pSrcB.addr.010, align 4
-  %conv2 = sext i32 %14 to i64
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %pSrcA.addr.011, i32 1
+  %10 = load i32, ptr %pSrcA.addr.011, align 4
+  %conv = sext i32 %10 to i64
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %pSrcB.addr.010, i32 1
+  %11 = load i32, ptr %pSrcB.addr.010, align 4
+  %conv2 = sext i32 %11 to i64
   %mul = mul nsw i64 %conv2, %conv
   %shr = ashr i64 %mul, 31
-  %15 = icmp sgt i64 %shr, -2147483648
-  %.val.i = select i1 %15, i64 %shr, i64 -2147483648
-  %16 = icmp slt i64 %.val.i, 2147483647
-  %retval.0.i = select i1 %16, i64 %.val.i, i64 2147483647
+  %12 = icmp sgt i64 %shr, -2147483648
+  %.val.i = select i1 %12, i64 %shr, i64 -2147483648
+  %13 = icmp slt i64 %.val.i, 2147483647
+  %retval.0.i = select i1 %13, i64 %.val.i, i64 2147483647
   %conv3 = trunc i64 %retval.0.i to i32
-  %incdec.ptr4 = getelementptr inbounds i32, i32* %pDst.addr.09, i32 1
-  store i32 %conv3, i32* %pDst.addr.09, align 4
+  %incdec.ptr4 = getelementptr inbounds i32, ptr %pDst.addr.09, i32 1
+  store i32 %conv3, ptr %pDst.addr.09, align 4
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_4_q31(i32* nocapture readonly %pSrcA, i32* nocapture readonly %pSrcB, i32* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_4_q31(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_4_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -361,41 +358,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i32* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i32* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i32* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -4
-  %ind.end = getelementptr i32, i32* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i32, i32* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i32, i32* %pDst, i32 %n.vec
+  %ind.end = getelementptr i32, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i32, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i32, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i32, i32* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i32, i32* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i32, i32* %pDst, i32 %index
-  %0 = bitcast i32* %next.gep to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %0, align 4
-  %1 = sext <4 x i32> %wide.load to <4 x i64>
-  %2 = bitcast i32* %next.gep18 to <4 x i32>*
-  %wide.load20 = load <4 x i32>, <4 x i32>* %2, align 4
-  %3 = sext <4 x i32> %wide.load20 to <4 x i64>
-  %4 = mul nsw <4 x i64> %3, %1
-  %5 = ashr <4 x i64> %4, <i64 31, i64 31, i64 31, i64 31>
-  %6 = icmp sgt <4 x i64> %5, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
-  %7 = select <4 x i1> %6, <4 x i64> %5, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
-  %8 = icmp slt <4 x i64> %7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %9 = select <4 x i1> %8, <4 x i64> %7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %10 = trunc <4 x i64> %9 to <4 x i32>
-  %11 = bitcast i32* %next.gep19 to <4 x i32>*
-  store <4 x i32> %10, <4 x i32>* %11, align 4
+  %next.gep = getelementptr i32, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i32, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i32, ptr %pDst, i32 %index
+  %wide.load = load <4 x i32>, ptr %next.gep, align 4
+  %0 = sext <4 x i32> %wide.load to <4 x i64>
+  %wide.load20 = load <4 x i32>, ptr %next.gep18, align 4
+  %1 = sext <4 x i32> %wide.load20 to <4 x i64>
+  %2 = mul nsw <4 x i64> %1, %0
+  %3 = ashr <4 x i64> %2, <i64 31, i64 31, i64 31, i64 31>
+  %4 = icmp sgt <4 x i64> %3, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+  %5 = select <4 x i1> %4, <4 x i64> %3, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+  %6 = icmp slt <4 x i64> %5, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %7 = select <4 x i1> %6, <4 x i64> %5, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %8 = trunc <4 x i64> %7 to <4 x i32>
+  store <4 x i32> %8, ptr %next.gep19, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -406,30 +400,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i32* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i32* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i32* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %pSrcA.addr.011, i32 1
-  %13 = load i32, i32* %pSrcA.addr.011, align 4
-  %conv = sext i32 %13 to i64
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %pSrcB.addr.010, i32 1
-  %14 = load i32, i32* %pSrcB.addr.010, align 4
-  %conv2 = sext i32 %14 to i64
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %pSrcA.addr.011, i32 1
+  %10 = load i32, ptr %pSrcA.addr.011, align 4
+  %conv = sext i32 %10 to i64
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %pSrcB.addr.010, i32 1
+  %11 = load i32, ptr %pSrcB.addr.010, align 4
+  %conv2 = sext i32 %11 to i64
   %mul = mul nsw i64 %conv2, %conv
   %shr = ashr i64 %mul, 31
-  %15 = icmp sgt i64 %shr, -2147483648
-  %.val.i = select i1 %15, i64 %shr, i64 -2147483648
-  %16 = icmp slt i64 %.val.i, 2147483647
-  %retval.0.i = select i1 %16, i64 %.val.i, i64 2147483647
+  %12 = icmp sgt i64 %shr, -2147483648
+  %.val.i = select i1 %12, i64 %shr, i64 -2147483648
+  %13 = icmp slt i64 %.val.i, 2147483647
+  %retval.0.i = select i1 %13, i64 %.val.i, i64 2147483647
   %conv3 = trunc i64 %retval.0.i to i32
-  %incdec.ptr4 = getelementptr inbounds i32, i32* %pDst.addr.09, i32 1
-  store i32 %conv3, i32* %pDst.addr.09, align 4
+  %incdec.ptr4 = getelementptr inbounds i32, ptr %pDst.addr.09, i32 1
+  store i32 %conv3, ptr %pDst.addr.09, align 4
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_4t_q31(i32* nocapture readonly %pSrcA, i32* nocapture readonly %pSrcB, i32* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_4t_q31(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_4t_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -585,34 +579,31 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = or <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %next.gep = getelementptr i32, i32* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i32, i32* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i32, i32* %pDst, i32 %index
+  %next.gep = getelementptr i32, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i32, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i32, ptr %pDst, i32 %index
   %0 = icmp ule <4 x i32> %induction, %broadcast.splat21
-  %1 = bitcast i32* %next.gep to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %0, <4 x i32> undef)
-  %2 = sext <4 x i32> %wide.masked.load to <4 x i64>
-  %3 = bitcast i32* %next.gep18 to <4 x i32>*
-  %wide.masked.load22 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %0, <4 x i32> undef)
-  %4 = sext <4 x i32> %wide.masked.load22 to <4 x i64>
-  %5 = mul nsw <4 x i64> %4, %2
-  %6 = ashr <4 x i64> %5, <i64 31, i64 31, i64 31, i64 31>
-  %7 = icmp sgt <4 x i64> %6, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
-  %8 = select <4 x i1> %7, <4 x i64> %6, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
-  %9 = icmp slt <4 x i64> %8, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %10 = select <4 x i1> %9, <4 x i64> %8, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %11 = trunc <4 x i64> %10 to <4 x i32>
-  %12 = bitcast i32* %next.gep19 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %11, <4 x i32>* %12, i32 4, <4 x i1> %0)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %next.gep, i32 4, <4 x i1> %0, <4 x i32> undef)
+  %1 = sext <4 x i32> %wide.masked.load to <4 x i64>
+  %wide.masked.load22 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %next.gep18, i32 4, <4 x i1> %0, <4 x i32> undef)
+  %2 = sext <4 x i32> %wide.masked.load22 to <4 x i64>
+  %3 = mul nsw <4 x i64> %2, %1
+  %4 = ashr <4 x i64> %3, <i64 31, i64 31, i64 31, i64 31>
+  %5 = icmp sgt <4 x i64> %4, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+  %6 = select <4 x i1> %5, <4 x i64> %4, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+  %7 = icmp slt <4 x i64> %6, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %8 = select <4 x i1> %7, <4 x i64> %6, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %9 = trunc <4 x i64> %8 to <4 x i32>
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %9, ptr %next.gep19, i32 4, <4 x i1> %0)
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @usatmul_2_q31(i32* nocapture readonly %pSrcA, i32* nocapture readonly %pSrcB, i32* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_2_q31(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_2_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -693,32 +684,29 @@ entry:
 
 vector.ph:                                        ; preds = %entry
   %n.vec = and i32 %N, -2
-  %ind.end = getelementptr i32, i32* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i32, i32* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i32, i32* %pDst, i32 %n.vec
+  %ind.end = getelementptr i32, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i32, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i32, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i32, i32* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i32, i32* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i32, i32* %pDst, i32 %index
-  %0 = bitcast i32* %next.gep to <2 x i32>*
-  %wide.load = load <2 x i32>, <2 x i32>* %0, align 4
-  %1 = zext <2 x i32> %wide.load to <2 x i64>
-  %2 = bitcast i32* %next.gep18 to <2 x i32>*
-  %wide.load20 = load <2 x i32>, <2 x i32>* %2, align 4
-  %3 = zext <2 x i32> %wide.load20 to <2 x i64>
-  %4 = mul nuw <2 x i64> %3, %1
-  %5 = lshr <2 x i64> %4, <i64 31, i64 31>
-  %6 = icmp ult <2 x i64> %5, <i64 4294967295, i64 4294967295>
-  %7 = select <2 x i1> %6, <2 x i64> %5, <2 x i64> <i64 4294967295, i64 4294967295>
-  %8 = trunc <2 x i64> %7 to <2 x i32>
-  %9 = bitcast i32* %next.gep19 to <2 x i32>*
-  store <2 x i32> %8, <2 x i32>* %9, align 4
+  %next.gep = getelementptr i32, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i32, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i32, ptr %pDst, i32 %index
+  %wide.load = load <2 x i32>, ptr %next.gep, align 4
+  %0 = zext <2 x i32> %wide.load to <2 x i64>
+  %wide.load20 = load <2 x i32>, ptr %next.gep18, align 4
+  %1 = zext <2 x i32> %wide.load20 to <2 x i64>
+  %2 = mul nuw <2 x i64> %1, %0
+  %3 = lshr <2 x i64> %2, <i64 31, i64 31>
+  %4 = icmp ult <2 x i64> %3, <i64 4294967295, i64 4294967295>
+  %5 = select <2 x i1> %4, <2 x i64> %3, <2 x i64> <i64 4294967295, i64 4294967295>
+  %6 = trunc <2 x i64> %5 to <2 x i32>
+  store <2 x i32> %6, ptr %next.gep19, align 4
   %index.next = add i32 %index, 2
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -726,9 +714,9 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader:                               ; preds = %entry, %middle.block
   %i.012.ph = phi i32 [ 0, %entry ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i32* [ %pSrcA, %entry ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i32* [ %pSrcB, %entry ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i32* [ %pDst, %entry ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %entry ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %entry ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %entry ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -736,28 +724,28 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader ]
-  %pSrcA.addr.011 = phi i32* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader ]
-  %pSrcB.addr.010 = phi i32* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader ]
-  %pDst.addr.09 = phi i32* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %pSrcA.addr.011, i32 1
-  %11 = load i32, i32* %pSrcA.addr.011, align 4
-  %conv = zext i32 %11 to i64
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %pSrcB.addr.010, i32 1
-  %12 = load i32, i32* %pSrcB.addr.010, align 4
-  %conv2 = zext i32 %12 to i64
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %pSrcA.addr.011, i32 1
+  %8 = load i32, ptr %pSrcA.addr.011, align 4
+  %conv = zext i32 %8 to i64
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %pSrcB.addr.010, i32 1
+  %9 = load i32, ptr %pSrcB.addr.010, align 4
+  %conv2 = zext i32 %9 to i64
   %mul = mul nuw i64 %conv2, %conv
   %shr = lshr i64 %mul, 31
-  %13 = icmp ult i64 %shr, 4294967295
-  %retval.0.i = select i1 %13, i64 %shr, i64 4294967295
+  %10 = icmp ult i64 %shr, 4294967295
+  %retval.0.i = select i1 %10, i64 %shr, i64 4294967295
   %conv3 = trunc i64 %retval.0.i to i32
-  %incdec.ptr4 = getelementptr inbounds i32, i32* %pDst.addr.09, i32 1
-  store i32 %conv3, i32* %pDst.addr.09, align 4
+  %incdec.ptr4 = getelementptr inbounds i32, ptr %pDst.addr.09, i32 1
+  store i32 %conv3, ptr %pDst.addr.09, align 4
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @usatmul_4_q31(i32* nocapture readonly %pSrcA, i32* nocapture readonly %pSrcB, i32* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_4_q31(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_4_q31:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
@@ -866,39 +854,36 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i32* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i32* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i32* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -4
-  %ind.end = getelementptr i32, i32* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i32, i32* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i32, i32* %pDst, i32 %n.vec
+  %ind.end = getelementptr i32, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i32, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i32, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i32, i32* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i32, i32* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i32, i32* %pDst, i32 %index
-  %0 = bitcast i32* %next.gep to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %0, align 4
-  %1 = zext <4 x i32> %wide.load to <4 x i64>
-  %2 = bitcast i32* %next.gep18 to <4 x i32>*
-  %wide.load20 = load <4 x i32>, <4 x i32>* %2, align 4
-  %3 = zext <4 x i32> %wide.load20 to <4 x i64>
-  %4 = mul nuw <4 x i64> %3, %1
-  %5 = lshr <4 x i64> %4, <i64 31, i64 31, i64 31, i64 31>
-  %6 = icmp ult <4 x i64> %5, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
-  %7 = select <4 x i1> %6, <4 x i64> %5, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = bitcast i32* %next.gep19 to <4 x i32>*
-  store <4 x i32> %8, <4 x i32>* %9, align 4
+  %next.gep = getelementptr i32, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i32, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i32, ptr %pDst, i32 %index
+  %wide.load = load <4 x i32>, ptr %next.gep, align 4
+  %0 = zext <4 x i32> %wide.load to <4 x i64>
+  %wide.load20 = load <4 x i32>, ptr %next.gep18, align 4
+  %1 = zext <4 x i32> %wide.load20 to <4 x i64>
+  %2 = mul nuw <4 x i64> %1, %0
+  %3 = lshr <4 x i64> %2, <i64 31, i64 31, i64 31, i64 31>
+  %4 = icmp ult <4 x i64> %3, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %5 = select <4 x i1> %4, <4 x i64> %3, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  store <4 x i32> %6, ptr %next.gep19, align 4
   %index.next = add i32 %index, 4
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -909,22 +894,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i32* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i32* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i32* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %pSrcA.addr.011, i32 1
-  %11 = load i32, i32* %pSrcA.addr.011, align 4
-  %conv = zext i32 %11 to i64
-  %incdec.ptr1 = getelementptr inbounds i32, i32* %pSrcB.addr.010, i32 1
-  %12 = load i32, i32* %pSrcB.addr.010, align 4
-  %conv2 = zext i32 %12 to i64
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %pSrcA.addr.011, i32 1
+  %8 = load i32, ptr %pSrcA.addr.011, align 4
+  %conv = zext i32 %8 to i64
+  %incdec.ptr1 = getelementptr inbounds i32, ptr %pSrcB.addr.010, i32 1
+  %9 = load i32, ptr %pSrcB.addr.010, align 4
+  %conv2 = zext i32 %9 to i64
   %mul = mul nuw i64 %conv2, %conv
   %shr = lshr i64 %mul, 31
-  %13 = icmp ult i64 %shr, 4294967295
-  %retval.0.i = select i1 %13, i64 %shr, i64 4294967295
+  %10 = icmp ult i64 %shr, 4294967295
+  %retval.0.i = select i1 %10, i64 %shr, i64 4294967295
   %conv3 = trunc i64 %retval.0.i to i32
-  %incdec.ptr4 = getelementptr inbounds i32, i32* %pDst.addr.09, i32 1
-  store i32 %conv3, i32* %pDst.addr.09, align 4
+  %incdec.ptr4 = getelementptr inbounds i32, ptr %pDst.addr.09, i32 1
+  store i32 %conv3, ptr %pDst.addr.09, align 4
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
@@ -933,7 +918,7 @@ for.body:                                         ; preds = %for.body.preheader2
 
 ; i16
 
-define arm_aapcs_vfpcc void @ssatmul_4_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_4_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_4_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -990,41 +975,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i16* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i16* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i16* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -4
-  %ind.end = getelementptr i16, i16* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i16, i16* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i16, i16* %pDst, i32 %n.vec
+  %ind.end = getelementptr i16, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i16, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i16, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
-  %0 = bitcast i16* %next.gep to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %0, align 2
-  %1 = sext <4 x i16> %wide.load to <4 x i32>
-  %2 = bitcast i16* %next.gep18 to <4 x i16>*
-  %wide.load20 = load <4 x i16>, <4 x i16>* %2, align 2
-  %3 = sext <4 x i16> %wide.load20 to <4 x i32>
-  %4 = mul nsw <4 x i32> %3, %1
-  %5 = ashr <4 x i32> %4, <i32 15, i32 15, i32 15, i32 15>
-  %6 = icmp sgt <4 x i32> %5, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %7 = select <4 x i1> %6, <4 x i32> %5, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %8 = icmp slt <4 x i32> %7, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %9 = select <4 x i1> %8, <4 x i32> %7, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %10 = trunc <4 x i32> %9 to <4 x i16>
-  %11 = bitcast i16* %next.gep19 to <4 x i16>*
-  store <4 x i16> %10, <4 x i16>* %11, align 2
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
+  %wide.load = load <4 x i16>, ptr %next.gep, align 2
+  %0 = sext <4 x i16> %wide.load to <4 x i32>
+  %wide.load20 = load <4 x i16>, ptr %next.gep18, align 2
+  %1 = sext <4 x i16> %wide.load20 to <4 x i32>
+  %2 = mul nsw <4 x i32> %1, %0
+  %3 = ashr <4 x i32> %2, <i32 15, i32 15, i32 15, i32 15>
+  %4 = icmp sgt <4 x i32> %3, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %5 = select <4 x i1> %4, <4 x i32> %3, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %6 = icmp slt <4 x i32> %5, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %7 = select <4 x i1> %6, <4 x i32> %5, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %8 = trunc <4 x i32> %7 to <4 x i16>
+  store <4 x i16> %8, ptr %next.gep19, align 2
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1035,30 +1017,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i16* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i16* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i16* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %pSrcA.addr.011, i32 1
-  %13 = load i16, i16* %pSrcA.addr.011, align 2
-  %conv = sext i16 %13 to i32
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %pSrcB.addr.010, i32 1
-  %14 = load i16, i16* %pSrcB.addr.010, align 2
-  %conv2 = sext i16 %14 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %pSrcA.addr.011, i32 1
+  %10 = load i16, ptr %pSrcA.addr.011, align 2
+  %conv = sext i16 %10 to i32
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %pSrcB.addr.010, i32 1
+  %11 = load i16, ptr %pSrcB.addr.010, align 2
+  %conv2 = sext i16 %11 to i32
   %mul = mul nsw i32 %conv2, %conv
   %shr = ashr i32 %mul, 15
-  %15 = icmp sgt i32 %shr, -32768
-  %.val.i = select i1 %15, i32 %shr, i32 -32768
-  %16 = icmp slt i32 %.val.i, 32767
-  %retval.0.i = select i1 %16, i32 %.val.i, i32 32767
+  %12 = icmp sgt i32 %shr, -32768
+  %.val.i = select i1 %12, i32 %shr, i32 -32768
+  %13 = icmp slt i32 %.val.i, 32767
+  %retval.0.i = select i1 %13, i32 %.val.i, i32 32767
   %conv3 = trunc i32 %retval.0.i to i16
-  %incdec.ptr4 = getelementptr inbounds i16, i16* %pDst.addr.09, i32 1
-  store i16 %conv3, i16* %pDst.addr.09, align 2
+  %incdec.ptr4 = getelementptr inbounds i16, ptr %pDst.addr.09, i32 1
+  store i16 %conv3, ptr %pDst.addr.09, align 2
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1117,41 +1099,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i16* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i16* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i16* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -8
-  %ind.end = getelementptr i16, i16* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i16, i16* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i16, i16* %pDst, i32 %n.vec
+  %ind.end = getelementptr i16, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i16, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i16, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = sext <8 x i16> %wide.load to <8 x i32>
-  %2 = bitcast i16* %next.gep18 to <8 x i16>*
-  %wide.load20 = load <8 x i16>, <8 x i16>* %2, align 2
-  %3 = sext <8 x i16> %wide.load20 to <8 x i32>
-  %4 = mul nsw <8 x i32> %3, %1
-  %5 = ashr <8 x i32> %4, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %6 = icmp sgt <8 x i32> %5, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %8 = icmp slt <8 x i32> %7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %9 = select <8 x i1> %8, <8 x i32> %7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %10 = trunc <8 x i32> %9 to <8 x i16>
-  %11 = bitcast i16* %next.gep19 to <8 x i16>*
-  store <8 x i16> %10, <8 x i16>* %11, align 2
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
+  %wide.load = load <8 x i16>, ptr %next.gep, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  %wide.load20 = load <8 x i16>, ptr %next.gep18, align 2
+  %1 = sext <8 x i16> %wide.load20 to <8 x i32>
+  %2 = mul nsw <8 x i32> %1, %0
+  %3 = ashr <8 x i32> %2, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %4 = icmp sgt <8 x i32> %3, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %6 = icmp slt <8 x i32> %5, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %8 = trunc <8 x i32> %7 to <8 x i16>
+  store <8 x i16> %8, ptr %next.gep19, align 2
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1162,30 +1141,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i16* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i16* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i16* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %pSrcA.addr.011, i32 1
-  %13 = load i16, i16* %pSrcA.addr.011, align 2
-  %conv = sext i16 %13 to i32
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %pSrcB.addr.010, i32 1
-  %14 = load i16, i16* %pSrcB.addr.010, align 2
-  %conv2 = sext i16 %14 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %pSrcA.addr.011, i32 1
+  %10 = load i16, ptr %pSrcA.addr.011, align 2
+  %conv = sext i16 %10 to i32
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %pSrcB.addr.010, i32 1
+  %11 = load i16, ptr %pSrcB.addr.010, align 2
+  %conv2 = sext i16 %11 to i32
   %mul = mul nsw i32 %conv2, %conv
   %shr = ashr i32 %mul, 15
-  %15 = icmp sgt i32 %shr, -32768
-  %.val.i = select i1 %15, i32 %shr, i32 -32768
-  %16 = icmp slt i32 %.val.i, 32767
-  %retval.0.i = select i1 %16, i32 %.val.i, i32 32767
+  %12 = icmp sgt i32 %shr, -32768
+  %.val.i = select i1 %12, i32 %shr, i32 -32768
+  %13 = icmp slt i32 %.val.i, 32767
+  %retval.0.i = select i1 %13, i32 %.val.i, i32 32767
   %conv3 = trunc i32 %retval.0.i to i16
-  %incdec.ptr4 = getelementptr inbounds i16, i16* %pDst.addr.09, i32 1
-  store i16 %conv3, i16* %pDst.addr.09, align 2
+  %incdec.ptr4 = getelementptr inbounds i16, ptr %pDst.addr.09, i32 1
+  store i16 %conv3, ptr %pDst.addr.09, align 2
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8i_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8i_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8i_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1244,54 +1223,51 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i16* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i16* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i16* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -8
-  %ind.end = getelementptr i16, i16* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i16, i16* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i16, i16* %pDst, i32 %n.vec
+  %ind.end = getelementptr i16, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i16, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i16, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %2 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
+  %wide.load = load <8 x i16>, ptr %next.gep, align 2
+  %0 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %1 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %2 = sext <4 x i16> %0 to <4 x i32>
   %3 = sext <4 x i16> %1 to <4 x i32>
-  %4 = sext <4 x i16> %2 to <4 x i32>
-  %5 = bitcast i16* %next.gep18 to <8 x i16>*
-  %wide.load20 = load <8 x i16>, <8 x i16>* %5, align 2
-  %6 = shufflevector <8 x i16> %wide.load20, <8 x i16> %wide.load20, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %7 = shufflevector <8 x i16> %wide.load20, <8 x i16> %wide.load20, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %8 = sext <4 x i16> %6 to <4 x i32>
-  %9 = sext <4 x i16> %7 to <4 x i32>
-  %10 = mul <4 x i32> %8, %3
-  %11 = mul <4 x i32> %9, %4
-  %12 = ashr <4 x i32> %10, <i32 15, i32 15, i32 15, i32 15>
-  %13 = ashr <4 x i32> %11, <i32 15, i32 15, i32 15, i32 15>
-  %14 = icmp sgt <4 x i32> %12, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %15 = icmp sgt <4 x i32> %13, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %16 = select <4 x i1> %14, <4 x i32> %12, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %17 = select <4 x i1> %15, <4 x i32> %13, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %18 = icmp slt <4 x i32> %16, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %19 = icmp slt <4 x i32> %17, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %20 = select <4 x i1> %18, <4 x i32> %16, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %21 = select <4 x i1> %19, <4 x i32> %17, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %22 = shufflevector <4 x i32> %20, <4 x i32> %21, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  %23 = trunc <8 x i32> %22 to <8 x i16>
-  %24 = bitcast i16* %next.gep19 to <8 x i16>*
-  store <8 x i16> %23, <8 x i16>* %24, align 2
+  %wide.load20 = load <8 x i16>, ptr %next.gep18, align 2
+  %4 = shufflevector <8 x i16> %wide.load20, <8 x i16> %wide.load20, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %5 = shufflevector <8 x i16> %wide.load20, <8 x i16> %wide.load20, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %6 = sext <4 x i16> %4 to <4 x i32>
+  %7 = sext <4 x i16> %5 to <4 x i32>
+  %8 = mul <4 x i32> %6, %2
+  %9 = mul <4 x i32> %7, %3
+  %10 = ashr <4 x i32> %8, <i32 15, i32 15, i32 15, i32 15>
+  %11 = ashr <4 x i32> %9, <i32 15, i32 15, i32 15, i32 15>
+  %12 = icmp sgt <4 x i32> %10, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %13 = icmp sgt <4 x i32> %11, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %14 = select <4 x i1> %12, <4 x i32> %10, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %15 = select <4 x i1> %13, <4 x i32> %11, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %16 = icmp slt <4 x i32> %14, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %17 = icmp slt <4 x i32> %15, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %18 = select <4 x i1> %16, <4 x i32> %14, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %19 = select <4 x i1> %17, <4 x i32> %15, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %20 = shufflevector <4 x i32> %18, <4 x i32> %19, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %21 = trunc <8 x i32> %20 to <8 x i16>
+  store <8 x i16> %21, ptr %next.gep19, align 2
   %index.next = add i32 %index, 8
-  %25 = icmp eq i32 %index.next, %n.vec
-  br i1 %25, label %middle.block, label %vector.body
+  %22 = icmp eq i32 %index.next, %n.vec
+  br i1 %22, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1302,30 +1278,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body, %for.body.preheader21
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i16* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i16* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i16* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %pSrcA.addr.011, i32 1
-  %26 = load i16, i16* %pSrcA.addr.011, align 2
-  %conv = sext i16 %26 to i32
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %pSrcB.addr.010, i32 1
-  %27 = load i16, i16* %pSrcB.addr.010, align 2
-  %conv2 = sext i16 %27 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %pSrcA.addr.011, i32 1
+  %23 = load i16, ptr %pSrcA.addr.011, align 2
+  %conv = sext i16 %23 to i32
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %pSrcB.addr.010, i32 1
+  %24 = load i16, ptr %pSrcB.addr.010, align 2
+  %conv2 = sext i16 %24 to i32
   %mul = mul nsw i32 %conv2, %conv
   %shr = ashr i32 %mul, 15
-  %28 = icmp sgt i32 %shr, -32768
-  %.val.i = select i1 %28, i32 %shr, i32 -32768
-  %29 = icmp slt i32 %.val.i, 32767
-  %retval.0.i = select i1 %29, i32 %.val.i, i32 32767
+  %25 = icmp sgt i32 %shr, -32768
+  %.val.i = select i1 %25, i32 %shr, i32 -32768
+  %26 = icmp slt i32 %.val.i, 32767
+  %retval.0.i = select i1 %26, i32 %.val.i, i32 32767
   %conv3 = trunc i32 %retval.0.i to i16
-  %incdec.ptr4 = getelementptr inbounds i16, i16* %pDst.addr.09, i32 1
-  store i16 %conv3, i16* %pDst.addr.09, align 2
+  %incdec.ptr4 = getelementptr inbounds i16, ptr %pDst.addr.09, i32 1
+  store i16 %conv3, ptr %pDst.addr.09, align 2
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_s4t_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_s4t_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_s4t_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -1383,34 +1359,31 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
   %induction = or <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
   %0 = icmp ule <4 x i32> %induction, %broadcast.splat21
-  %1 = bitcast i16* %next.gep to <4 x i16>*
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %1, i32 2, <4 x i1> %0, <4 x i16> undef)
-  %2 = sext <4 x i16> %wide.masked.load to <4 x i32>
-  %3 = bitcast i16* %next.gep18 to <4 x i16>*
-  %wide.masked.load22 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %3, i32 2, <4 x i1> %0, <4 x i16> undef)
-  %4 = sext <4 x i16> %wide.masked.load22 to <4 x i32>
-  %5 = mul nsw <4 x i32> %4, %2
-  %6 = ashr <4 x i32> %5, <i32 15, i32 15, i32 15, i32 15>
-  %7 = icmp sgt <4 x i32> %6, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %8 = select <4 x i1> %7, <4 x i32> %6, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %9 = icmp slt <4 x i32> %8, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %10 = select <4 x i1> %9, <4 x i32> %8, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %11 = trunc <4 x i32> %10 to <4 x i16>
-  %12 = bitcast i16* %next.gep19 to <4 x i16>*
-  call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> %11, <4 x i16>* %12, i32 2, <4 x i1> %0)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %next.gep, i32 2, <4 x i1> %0, <4 x i16> undef)
+  %1 = sext <4 x i16> %wide.masked.load to <4 x i32>
+  %wide.masked.load22 = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %next.gep18, i32 2, <4 x i1> %0, <4 x i16> undef)
+  %2 = sext <4 x i16> %wide.masked.load22 to <4 x i32>
+  %3 = mul nsw <4 x i32> %2, %1
+  %4 = ashr <4 x i32> %3, <i32 15, i32 15, i32 15, i32 15>
+  %5 = icmp sgt <4 x i32> %4, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %6 = select <4 x i1> %5, <4 x i32> %4, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %7 = icmp slt <4 x i32> %6, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %8 = select <4 x i1> %7, <4 x i32> %6, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %9 = trunc <4 x i32> %8 to <4 x i16>
+  call void @llvm.masked.store.v4i16.p0(<4 x i16> %9, ptr %next.gep19, i32 2, <4 x i1> %0)
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8t_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8t_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8t_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -1498,34 +1471,31 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
   %induction = or <8 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
   %0 = icmp ule <8 x i32> %induction, %broadcast.splat21
-  %1 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %0, <8 x i16> undef)
-  %2 = sext <8 x i16> %wide.masked.load to <8 x i32>
-  %3 = bitcast i16* %next.gep18 to <8 x i16>*
-  %wide.masked.load22 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %3, i32 2, <8 x i1> %0, <8 x i16> undef)
-  %4 = sext <8 x i16> %wide.masked.load22 to <8 x i32>
-  %5 = mul nsw <8 x i32> %4, %2
-  %6 = ashr <8 x i32> %5, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %7 = icmp sgt <8 x i32> %6, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %8 = select <8 x i1> %7, <8 x i32> %6, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %9 = icmp slt <8 x i32> %8, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %10 = select <8 x i1> %9, <8 x i32> %8, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %11 = trunc <8 x i32> %10 to <8 x i16>
-  %12 = bitcast i16* %next.gep19 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %11, <8 x i16>* %12, i32 2, <8 x i1> %0)
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %0, <8 x i16> undef)
+  %1 = sext <8 x i16> %wide.masked.load to <8 x i32>
+  %wide.masked.load22 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep18, i32 2, <8 x i1> %0, <8 x i16> undef)
+  %2 = sext <8 x i16> %wide.masked.load22 to <8 x i32>
+  %3 = mul nsw <8 x i32> %2, %1
+  %4 = ashr <8 x i32> %3, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %5 = icmp sgt <8 x i32> %4, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %7 = icmp slt <8 x i32> %6, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %8 = select <8 x i1> %7, <8 x i32> %6, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %9 = trunc <8 x i32> %8 to <8 x i16>
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %9, ptr %next.gep19, i32 2, <8 x i1> %0)
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8ti_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8ti_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8ti_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -1613,47 +1583,44 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
   %induction = or <8 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
   %0 = icmp ule <8 x i32> %induction, %broadcast.splat21
-  %1 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %0, <8 x i16> undef)
-  %2 = shufflevector <8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %3 = shufflevector <8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep, i32 2, <8 x i1> %0, <8 x i16> undef)
+  %1 = shufflevector <8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %2 = shufflevector <8 x i16> %wide.masked.load, <8 x i16> %wide.masked.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %3 = sext <4 x i16> %1 to <4 x i32>
   %4 = sext <4 x i16> %2 to <4 x i32>
-  %5 = sext <4 x i16> %3 to <4 x i32>
-  %6 = bitcast i16* %next.gep18 to <8 x i16>*
-  %wide.masked.load22 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %6, i32 2, <8 x i1> %0, <8 x i16> undef)
-  %7 = shufflevector <8 x i16> %wide.masked.load22, <8 x i16> %wide.masked.load22, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %8 = shufflevector <8 x i16> %wide.masked.load22, <8 x i16> %wide.masked.load22, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %9 = sext <4 x i16> %7 to <4 x i32>
-  %10 = sext <4 x i16> %8 to <4 x i32>
-  %11 = mul <4 x i32> %9, %4
-  %12 = mul <4 x i32> %10, %5
-  %13 = ashr <4 x i32> %11, <i32 15, i32 15, i32 15, i32 15>
-  %14 = ashr <4 x i32> %12, <i32 15, i32 15, i32 15, i32 15>
-  %15 = icmp sgt <4 x i32> %13, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %16 = icmp sgt <4 x i32> %14, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %17 = select <4 x i1> %15, <4 x i32> %13, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %18 = select <4 x i1> %16, <4 x i32> %14, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
-  %19 = icmp slt <4 x i32> %17, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %20 = icmp slt <4 x i32> %18, <i32 32767, i32 32767, i32 32767, i32 32767>
-  %21 = select <4 x i1> %19, <4 x i32> %17, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %22 = select <4 x i1> %20, <4 x i32> %18, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
-  %23 = shufflevector <4 x i32> %21, <4 x i32> %22, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  %24 = trunc <8 x i32> %23 to <8 x i16>
-  %25 = bitcast i16* %next.gep19 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %24, <8 x i16>* %25, i32 2, <8 x i1> %0)
+  %wide.masked.load22 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %next.gep18, i32 2, <8 x i1> %0, <8 x i16> undef)
+  %5 = shufflevector <8 x i16> %wide.masked.load22, <8 x i16> %wide.masked.load22, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %6 = shufflevector <8 x i16> %wide.masked.load22, <8 x i16> %wide.masked.load22, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %7 = sext <4 x i16> %5 to <4 x i32>
+  %8 = sext <4 x i16> %6 to <4 x i32>
+  %9 = mul <4 x i32> %7, %3
+  %10 = mul <4 x i32> %8, %4
+  %11 = ashr <4 x i32> %9, <i32 15, i32 15, i32 15, i32 15>
+  %12 = ashr <4 x i32> %10, <i32 15, i32 15, i32 15, i32 15>
+  %13 = icmp sgt <4 x i32> %11, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %14 = icmp sgt <4 x i32> %12, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %15 = select <4 x i1> %13, <4 x i32> %11, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %16 = select <4 x i1> %14, <4 x i32> %12, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+  %17 = icmp slt <4 x i32> %15, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %18 = icmp slt <4 x i32> %16, <i32 32767, i32 32767, i32 32767, i32 32767>
+  %19 = select <4 x i1> %17, <4 x i32> %15, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %20 = select <4 x i1> %18, <4 x i32> %16, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+  %21 = shufflevector <4 x i32> %19, <4 x i32> %20, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %22 = trunc <8 x i32> %21 to <8 x i16>
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %22, ptr %next.gep19, i32 2, <8 x i1> %0)
   %index.next = add i32 %index, 8
-  %26 = icmp eq i32 %index.next, %n.vec
-  br i1 %26, label %for.cond.cleanup, label %vector.body
+  %23 = icmp eq i32 %index.next, %n.vec
+  br i1 %23, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @usatmul_4_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_4_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_4_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1716,39 +1683,36 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i16* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i16* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i16* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -4
-  %ind.end = getelementptr i16, i16* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i16, i16* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i16, i16* %pDst, i32 %n.vec
+  %ind.end = getelementptr i16, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i16, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i16, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
-  %0 = bitcast i16* %next.gep to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %0, align 2
-  %1 = zext <4 x i16> %wide.load to <4 x i32>
-  %2 = bitcast i16* %next.gep18 to <4 x i16>*
-  %wide.load20 = load <4 x i16>, <4 x i16>* %2, align 2
-  %3 = zext <4 x i16> %wide.load20 to <4 x i32>
-  %4 = mul nuw <4 x i32> %3, %1
-  %5 = lshr <4 x i32> %4, <i32 15, i32 15, i32 15, i32 15>
-  %6 = icmp ult <4 x i32> %5, <i32 65535, i32 65535, i32 65535, i32 65535>
-  %7 = select <4 x i1> %6, <4 x i32> %5, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
-  %8 = trunc <4 x i32> %7 to <4 x i16>
-  %9 = bitcast i16* %next.gep19 to <4 x i16>*
-  store <4 x i16> %8, <4 x i16>* %9, align 2
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
+  %wide.load = load <4 x i16>, ptr %next.gep, align 2
+  %0 = zext <4 x i16> %wide.load to <4 x i32>
+  %wide.load20 = load <4 x i16>, ptr %next.gep18, align 2
+  %1 = zext <4 x i16> %wide.load20 to <4 x i32>
+  %2 = mul nuw <4 x i32> %1, %0
+  %3 = lshr <4 x i32> %2, <i32 15, i32 15, i32 15, i32 15>
+  %4 = icmp ult <4 x i32> %3, <i32 65535, i32 65535, i32 65535, i32 65535>
+  %5 = select <4 x i1> %4, <4 x i32> %3, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
+  %6 = trunc <4 x i32> %5 to <4 x i16>
+  store <4 x i16> %6, ptr %next.gep19, align 2
   %index.next = add i32 %index, 4
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1759,28 +1723,28 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i16* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i16* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i16* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %pSrcA.addr.011, i32 1
-  %11 = load i16, i16* %pSrcA.addr.011, align 2
-  %conv = zext i16 %11 to i32
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %pSrcB.addr.010, i32 1
-  %12 = load i16, i16* %pSrcB.addr.010, align 2
-  %conv2 = zext i16 %12 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %pSrcA.addr.011, i32 1
+  %8 = load i16, ptr %pSrcA.addr.011, align 2
+  %conv = zext i16 %8 to i32
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %pSrcB.addr.010, i32 1
+  %9 = load i16, ptr %pSrcB.addr.010, align 2
+  %conv2 = zext i16 %9 to i32
   %mul = mul nuw i32 %conv2, %conv
   %shr = lshr i32 %mul, 15
-  %13 = icmp ult i32 %shr, 65535
-  %retval.0.i = select i1 %13, i32 %shr, i32 65535
+  %10 = icmp ult i32 %shr, 65535
+  %retval.0.i = select i1 %10, i32 %shr, i32 65535
   %conv3 = trunc i32 %retval.0.i to i16
-  %incdec.ptr4 = getelementptr inbounds i16, i16* %pDst.addr.09, i32 1
-  store i16 %conv3, i16* %pDst.addr.09, align 2
+  %incdec.ptr4 = getelementptr inbounds i16, ptr %pDst.addr.09, i32 1
+  store i16 %conv3, ptr %pDst.addr.09, align 2
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @usatmul_8_q15(i16* nocapture readonly %pSrcA, i16* nocapture readonly %pSrcB, i16* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_8_q15(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_8_q15:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1845,39 +1809,36 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i16* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i16* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i16* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -8
-  %ind.end = getelementptr i16, i16* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i16, i16* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i16, i16* %pDst, i32 %n.vec
+  %ind.end = getelementptr i16, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i16, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i16, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i16, i16* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i16, i16* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i16, i16* %pDst, i32 %index
-  %0 = bitcast i16* %next.gep to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = zext <8 x i16> %wide.load to <8 x i32>
-  %2 = bitcast i16* %next.gep18 to <8 x i16>*
-  %wide.load20 = load <8 x i16>, <8 x i16>* %2, align 2
-  %3 = zext <8 x i16> %wide.load20 to <8 x i32>
-  %4 = mul nuw <8 x i32> %3, %1
-  %5 = lshr <8 x i32> %4, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %6 = icmp ult <8 x i32> %5, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
-  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = bitcast i16* %next.gep19 to <8 x i16>*
-  store <8 x i16> %8, <8 x i16>* %9, align 2
+  %next.gep = getelementptr i16, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i16, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i16, ptr %pDst, i32 %index
+  %wide.load = load <8 x i16>, ptr %next.gep, align 2
+  %0 = zext <8 x i16> %wide.load to <8 x i32>
+  %wide.load20 = load <8 x i16>, ptr %next.gep18, align 2
+  %1 = zext <8 x i16> %wide.load20 to <8 x i32>
+  %2 = mul nuw <8 x i32> %1, %0
+  %3 = lshr <8 x i32> %2, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %4 = icmp ult <8 x i32> %3, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %5 = select <8 x i1> %4, <8 x i32> %3, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  store <8 x i16> %6, ptr %next.gep19, align 2
   %index.next = add i32 %index, 8
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -1888,22 +1849,22 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i16* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i16* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i16* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %pSrcA.addr.011, i32 1
-  %11 = load i16, i16* %pSrcA.addr.011, align 2
-  %conv = zext i16 %11 to i32
-  %incdec.ptr1 = getelementptr inbounds i16, i16* %pSrcB.addr.010, i32 1
-  %12 = load i16, i16* %pSrcB.addr.010, align 2
-  %conv2 = zext i16 %12 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %pSrcA.addr.011, i32 1
+  %8 = load i16, ptr %pSrcA.addr.011, align 2
+  %conv = zext i16 %8 to i32
+  %incdec.ptr1 = getelementptr inbounds i16, ptr %pSrcB.addr.010, i32 1
+  %9 = load i16, ptr %pSrcB.addr.010, align 2
+  %conv2 = zext i16 %9 to i32
   %mul = mul nuw i32 %conv2, %conv
   %shr = lshr i32 %mul, 15
-  %13 = icmp ult i32 %shr, 65535
-  %retval.0.i = select i1 %13, i32 %shr, i32 65535
+  %10 = icmp ult i32 %shr, 65535
+  %retval.0.i = select i1 %10, i32 %shr, i32 65535
   %conv3 = trunc i32 %retval.0.i to i16
-  %incdec.ptr4 = getelementptr inbounds i16, i16* %pDst.addr.09, i32 1
-  store i16 %conv3, i16* %pDst.addr.09, align 2
+  %incdec.ptr4 = getelementptr inbounds i16, ptr %pDst.addr.09, i32 1
+  store i16 %conv3, ptr %pDst.addr.09, align 2
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
@@ -1912,7 +1873,7 @@ for.body:                                         ; preds = %for.body.preheader2
 
 ; i8
 
-define arm_aapcs_vfpcc void @ssatmul_4_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_4_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_4_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1974,41 +1935,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader21:                             ; preds = %middle.block, %for.body.preheader
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.011.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.010.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
-  %pDst.addr.09.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pSrcA.addr.011.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.010.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end15, %middle.block ]
+  %pDst.addr.09.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end17, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -4
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end15 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end15 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep18 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep19 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <4 x i8>*
-  %wide.load = load <4 x i8>, <4 x i8>* %0, align 1
-  %1 = sext <4 x i8> %wide.load to <4 x i32>
-  %2 = bitcast i8* %next.gep18 to <4 x i8>*
-  %wide.load20 = load <4 x i8>, <4 x i8>* %2, align 1
-  %3 = sext <4 x i8> %wide.load20 to <4 x i32>
-  %4 = mul nsw <4 x i32> %3, %1
-  %5 = ashr <4 x i32> %4, <i32 7, i32 7, i32 7, i32 7>
-  %6 = icmp sgt <4 x i32> %5, <i32 -128, i32 -128, i32 -128, i32 -128>
-  %7 = select <4 x i1> %6, <4 x i32> %5, <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>
-  %8 = icmp slt <4 x i32> %7, <i32 127, i32 127, i32 127, i32 127>
-  %9 = select <4 x i1> %8, <4 x i32> %7, <4 x i32> <i32 127, i32 127, i32 127, i32 127>
-  %10 = trunc <4 x i32> %9 to <4 x i8>
-  %11 = bitcast i8* %next.gep19 to <4 x i8>*
-  store <4 x i8> %10, <4 x i8>* %11, align 1
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep18 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep19 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <4 x i8>, ptr %next.gep, align 1
+  %0 = sext <4 x i8> %wide.load to <4 x i32>
+  %wide.load20 = load <4 x i8>, ptr %next.gep18, align 1
+  %1 = sext <4 x i8> %wide.load20 to <4 x i32>
+  %2 = mul nsw <4 x i32> %1, %0
+  %3 = ashr <4 x i32> %2, <i32 7, i32 7, i32 7, i32 7>
+  %4 = icmp sgt <4 x i32> %3, <i32 -128, i32 -128, i32 -128, i32 -128>
+  %5 = select <4 x i1> %4, <4 x i32> %3, <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>
+  %6 = icmp slt <4 x i32> %5, <i32 127, i32 127, i32 127, i32 127>
+  %7 = select <4 x i1> %6, <4 x i32> %5, <4 x i32> <i32 127, i32 127, i32 127, i32 127>
+  %8 = trunc <4 x i32> %7 to <4 x i8>
+  store <4 x i8> %8, ptr %next.gep19, align 1
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -2019,30 +1977,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %pSrcA.addr.011 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
-  %pSrcB.addr.010 = phi i8* [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
-  %pDst.addr.09 = phi i8* [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.011, i32 1
-  %13 = load i8, i8* %pSrcA.addr.011, align 1
-  %conv = sext i8 %13 to i32
-  %incdec.ptr1 = getelementptr inbounds i8, i8* %pSrcB.addr.010, i32 1
-  %14 = load i8, i8* %pSrcB.addr.010, align 1
-  %conv2 = sext i8 %14 to i32
+  %pSrcA.addr.011 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.011.ph, %for.body.preheader21 ]
+  %pSrcB.addr.010 = phi ptr [ %incdec.ptr1, %for.body ], [ %pSrcB.addr.010.ph, %for.body.preheader21 ]
+  %pDst.addr.09 = phi ptr [ %incdec.ptr4, %for.body ], [ %pDst.addr.09.ph, %for.body.preheader21 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.011, i32 1
+  %10 = load i8, ptr %pSrcA.addr.011, align 1
+  %conv = sext i8 %10 to i32
+  %incdec.ptr1 = getelementptr inbounds i8, ptr %pSrcB.addr.010, i32 1
+  %11 = load i8, ptr %pSrcB.addr.010, align 1
+  %conv2 = sext i8 %11 to i32
   %mul = mul nsw i32 %conv2, %conv
   %shr = ashr i32 %mul, 7
-  %15 = icmp sgt i32 %shr, -128
-  %.val.i = select i1 %15, i32 %shr, i32 -128
-  %16 = icmp slt i32 %.val.i, 127
-  %retval.0.i = select i1 %16, i32 %.val.i, i32 127
+  %12 = icmp sgt i32 %shr, -128
+  %.val.i = select i1 %12, i32 %shr, i32 -128
+  %13 = icmp slt i32 %.val.i, 127
+  %retval.0.i = select i1 %13, i32 %.val.i, i32 127
   %conv3 = trunc i32 %retval.0.i to i8
-  %incdec.ptr4 = getelementptr inbounds i8, i8* %pDst.addr.09, i32 1
-  store i8 %conv3, i8* %pDst.addr.09, align 1
+  %incdec.ptr4 = getelementptr inbounds i8, ptr %pDst.addr.09, i32 1
+  store i8 %conv3, ptr %pDst.addr.09, align 1
   %inc = add nuw i32 %i.012, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -2099,41 +2057,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader23:                             ; preds = %middle.block, %for.body.preheader
   %i.014.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.013.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.012.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
-  %pDst.addr.011.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
+  %pSrcA.addr.013.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.012.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pDst.addr.011.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -8
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end19 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end19 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %0, align 1
-  %1 = sext <8 x i8> %wide.load to <8 x i16>
-  %2 = bitcast i8* %next.gep20 to <8 x i8>*
-  %wide.load22 = load <8 x i8>, <8 x i8>* %2, align 1
-  %3 = sext <8 x i8> %wide.load22 to <8 x i16>
-  %4 = mul nsw <8 x i16> %3, %1
-  %5 = ashr <8 x i16> %4, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %6 = icmp sgt <8 x i16> %5, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %7 = select <8 x i1> %6, <8 x i16> %5, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %8 = icmp slt <8 x i16> %7, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %9 = select <8 x i1> %8, <8 x i16> %7, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %10 = trunc <8 x i16> %9 to <8 x i8>
-  %11 = bitcast i8* %next.gep21 to <8 x i8>*
-  store <8 x i8> %10, <8 x i8>* %11, align 1
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <8 x i8>, ptr %next.gep, align 1
+  %0 = sext <8 x i8> %wide.load to <8 x i16>
+  %wide.load22 = load <8 x i8>, ptr %next.gep20, align 1
+  %1 = sext <8 x i8> %wide.load22 to <8 x i16>
+  %2 = mul nsw <8 x i16> %1, %0
+  %3 = ashr <8 x i16> %2, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %4 = icmp sgt <8 x i16> %3, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %6 = icmp slt <8 x i16> %5, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %7 = select <8 x i1> %6, <8 x i16> %5, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %8 = trunc <8 x i16> %7 to <8 x i8>
+  store <8 x i8> %8, ptr %next.gep21, align 1
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -2144,30 +2099,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader23, %for.body
   %i.014 = phi i32 [ %inc, %for.body ], [ %i.014.ph, %for.body.preheader23 ]
-  %pSrcA.addr.013 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
-  %pSrcB.addr.012 = phi i8* [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
-  %pDst.addr.011 = phi i8* [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.013, i32 1
-  %13 = load i8, i8* %pSrcA.addr.013, align 1
-  %conv1 = sext i8 %13 to i16
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %pSrcB.addr.012, i32 1
-  %14 = load i8, i8* %pSrcB.addr.012, align 1
-  %conv3 = sext i8 %14 to i16
+  %pSrcA.addr.013 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
+  %pSrcB.addr.012 = phi ptr [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
+  %pDst.addr.011 = phi ptr [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.013, i32 1
+  %10 = load i8, ptr %pSrcA.addr.013, align 1
+  %conv1 = sext i8 %10 to i16
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %pSrcB.addr.012, i32 1
+  %11 = load i8, ptr %pSrcB.addr.012, align 1
+  %conv3 = sext i8 %11 to i16
   %mul = mul nsw i16 %conv3, %conv1
   %shr = ashr i16 %mul, 7
-  %15 = icmp sgt i16 %shr, -128
-  %.val.i = select i1 %15, i16 %shr, i16 -128
-  %16 = icmp slt i16 %.val.i, 127
-  %retval.0.i = select i1 %16, i16 %.val.i, i16 127
+  %12 = icmp sgt i16 %shr, -128
+  %.val.i = select i1 %12, i16 %shr, i16 -128
+  %13 = icmp slt i16 %.val.i, 127
+  %retval.0.i = select i1 %13, i16 %.val.i, i16 127
   %conv5 = trunc i16 %retval.0.i to i8
-  %incdec.ptr6 = getelementptr inbounds i8, i8* %pDst.addr.011, i32 1
-  store i8 %conv5, i8* %pDst.addr.011, align 1
+  %incdec.ptr6 = getelementptr inbounds i8, ptr %pDst.addr.011, i32 1
+  store i8 %conv5, ptr %pDst.addr.011, align 1
   %inc = add nuw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_16_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_16_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_16_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -2226,41 +2181,38 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader23:                             ; preds = %middle.block, %for.body.preheader
   %i.014.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.013.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.012.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
-  %pDst.addr.011.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
+  %pSrcA.addr.013.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.012.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pDst.addr.011.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -16
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end19 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end19 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = sext <16 x i8> %wide.load to <16 x i16>
-  %2 = bitcast i8* %next.gep20 to <16 x i8>*
-  %wide.load22 = load <16 x i8>, <16 x i8>* %2, align 1
-  %3 = sext <16 x i8> %wide.load22 to <16 x i16>
-  %4 = mul nsw <16 x i16> %3, %1
-  %5 = ashr <16 x i16> %4, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %6 = icmp sgt <16 x i16> %5, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %7 = select <16 x i1> %6, <16 x i16> %5, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %8 = icmp slt <16 x i16> %7, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %9 = select <16 x i1> %8, <16 x i16> %7, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %10 = trunc <16 x i16> %9 to <16 x i8>
-  %11 = bitcast i8* %next.gep21 to <16 x i8>*
-  store <16 x i8> %10, <16 x i8>* %11, align 1
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <16 x i8>, ptr %next.gep, align 1
+  %0 = sext <16 x i8> %wide.load to <16 x i16>
+  %wide.load22 = load <16 x i8>, ptr %next.gep20, align 1
+  %1 = sext <16 x i8> %wide.load22 to <16 x i16>
+  %2 = mul nsw <16 x i16> %1, %0
+  %3 = ashr <16 x i16> %2, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %4 = icmp sgt <16 x i16> %3, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %6 = icmp slt <16 x i16> %5, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %7 = select <16 x i1> %6, <16 x i16> %5, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %8 = trunc <16 x i16> %7 to <16 x i8>
+  store <16 x i8> %8, ptr %next.gep21, align 1
   %index.next = add i32 %index, 16
-  %12 = icmp eq i32 %index.next, %n.vec
-  br i1 %12, label %middle.block, label %vector.body
+  %9 = icmp eq i32 %index.next, %n.vec
+  br i1 %9, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -2271,30 +2223,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader23, %for.body
   %i.014 = phi i32 [ %inc, %for.body ], [ %i.014.ph, %for.body.preheader23 ]
-  %pSrcA.addr.013 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
-  %pSrcB.addr.012 = phi i8* [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
-  %pDst.addr.011 = phi i8* [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.013, i32 1
-  %13 = load i8, i8* %pSrcA.addr.013, align 1
-  %conv1 = sext i8 %13 to i16
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %pSrcB.addr.012, i32 1
-  %14 = load i8, i8* %pSrcB.addr.012, align 1
-  %conv3 = sext i8 %14 to i16
+  %pSrcA.addr.013 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
+  %pSrcB.addr.012 = phi ptr [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
+  %pDst.addr.011 = phi ptr [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.013, i32 1
+  %10 = load i8, ptr %pSrcA.addr.013, align 1
+  %conv1 = sext i8 %10 to i16
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %pSrcB.addr.012, i32 1
+  %11 = load i8, ptr %pSrcB.addr.012, align 1
+  %conv3 = sext i8 %11 to i16
   %mul = mul nsw i16 %conv3, %conv1
   %shr = ashr i16 %mul, 7
-  %15 = icmp sgt i16 %shr, -128
-  %.val.i = select i1 %15, i16 %shr, i16 -128
-  %16 = icmp slt i16 %.val.i, 127
-  %retval.0.i = select i1 %16, i16 %.val.i, i16 127
+  %12 = icmp sgt i16 %shr, -128
+  %.val.i = select i1 %12, i16 %shr, i16 -128
+  %13 = icmp slt i16 %.val.i, 127
+  %retval.0.i = select i1 %13, i16 %.val.i, i16 127
   %conv5 = trunc i16 %retval.0.i to i8
-  %incdec.ptr6 = getelementptr inbounds i8, i8* %pDst.addr.011, i32 1
-  store i8 %conv5, i8* %pDst.addr.011, align 1
+  %incdec.ptr6 = getelementptr inbounds i8, ptr %pDst.addr.011, i32 1
+  store i8 %conv5, ptr %pDst.addr.011, align 1
   %inc = add nuw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_16i_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_16i_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_16i_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -2353,54 +2305,51 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader23:                             ; preds = %middle.block, %for.body.preheader
   %i.014.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.013.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.012.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
-  %pDst.addr.011.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
+  %pSrcA.addr.013.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.012.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pDst.addr.011.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -16
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end19 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end19 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %2 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <16 x i8>, ptr %next.gep, align 1
+  %0 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %2 = sext <8 x i8> %0 to <8 x i16>
   %3 = sext <8 x i8> %1 to <8 x i16>
-  %4 = sext <8 x i8> %2 to <8 x i16>
-  %5 = bitcast i8* %next.gep20 to <16 x i8>*
-  %wide.load22 = load <16 x i8>, <16 x i8>* %5, align 1
-  %6 = shufflevector <16 x i8> %wide.load22, <16 x i8> %wide.load22, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %7 = shufflevector <16 x i8> %wide.load22, <16 x i8> %wide.load22, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %8 = sext <8 x i8> %6 to <8 x i16>
-  %9 = sext <8 x i8> %7 to <8 x i16>
-  %10 = mul <8 x i16> %8, %3
-  %11 = mul <8 x i16> %9, %4
-  %12 = ashr <8 x i16> %10, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %13 = ashr <8 x i16> %11, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %14 = icmp sgt <8 x i16> %12, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %15 = icmp sgt <8 x i16> %13, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %16 = select <8 x i1> %14, <8 x i16> %12, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %17 = select <8 x i1> %15, <8 x i16> %13, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %18 = icmp slt <8 x i16> %16, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %19 = icmp slt <8 x i16> %17, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %20 = select <8 x i1> %18, <8 x i16> %16, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %21 = select <8 x i1> %19, <8 x i16> %17, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %22 = shufflevector <8 x i16> %20, <8 x i16> %21, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  %23 = trunc <16 x i16> %22 to <16 x i8>
-  %24 = bitcast i8* %next.gep21 to <16 x i8>*
-  store <16 x i8> %23, <16 x i8>* %24, align 1
+  %wide.load22 = load <16 x i8>, ptr %next.gep20, align 1
+  %4 = shufflevector <16 x i8> %wide.load22, <16 x i8> %wide.load22, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %5 = shufflevector <16 x i8> %wide.load22, <16 x i8> %wide.load22, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %6 = sext <8 x i8> %4 to <8 x i16>
+  %7 = sext <8 x i8> %5 to <8 x i16>
+  %8 = mul <8 x i16> %6, %2
+  %9 = mul <8 x i16> %7, %3
+  %10 = ashr <8 x i16> %8, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %11 = ashr <8 x i16> %9, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %12 = icmp sgt <8 x i16> %10, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %13 = icmp sgt <8 x i16> %11, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %14 = select <8 x i1> %12, <8 x i16> %10, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %15 = select <8 x i1> %13, <8 x i16> %11, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %16 = icmp slt <8 x i16> %14, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %17 = icmp slt <8 x i16> %15, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %18 = select <8 x i1> %16, <8 x i16> %14, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %19 = select <8 x i1> %17, <8 x i16> %15, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %20 = shufflevector <8 x i16> %18, <8 x i16> %19, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %21 = trunc <16 x i16> %20 to <16 x i8>
+  store <16 x i8> %21, ptr %next.gep21, align 1
   %index.next = add i32 %index, 16
-  %25 = icmp eq i32 %index.next, %n.vec
-  br i1 %25, label %middle.block, label %vector.body
+  %22 = icmp eq i32 %index.next, %n.vec
+  br i1 %22, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -2411,30 +2360,30 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body, %for.body.preheader23
   %i.014 = phi i32 [ %inc, %for.body ], [ %i.014.ph, %for.body.preheader23 ]
-  %pSrcA.addr.013 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
-  %pSrcB.addr.012 = phi i8* [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
-  %pDst.addr.011 = phi i8* [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.013, i32 1
-  %26 = load i8, i8* %pSrcA.addr.013, align 1
-  %conv1 = sext i8 %26 to i16
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %pSrcB.addr.012, i32 1
-  %27 = load i8, i8* %pSrcB.addr.012, align 1
-  %conv3 = sext i8 %27 to i16
+  %pSrcA.addr.013 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
+  %pSrcB.addr.012 = phi ptr [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
+  %pDst.addr.011 = phi ptr [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.013, i32 1
+  %23 = load i8, ptr %pSrcA.addr.013, align 1
+  %conv1 = sext i8 %23 to i16
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %pSrcB.addr.012, i32 1
+  %24 = load i8, ptr %pSrcB.addr.012, align 1
+  %conv3 = sext i8 %24 to i16
   %mul = mul nsw i16 %conv3, %conv1
   %shr = ashr i16 %mul, 7
-  %28 = icmp sgt i16 %shr, -128
-  %.val.i = select i1 %28, i16 %shr, i16 -128
-  %29 = icmp slt i16 %.val.i, 127
-  %retval.0.i = select i1 %29, i16 %.val.i, i16 127
+  %25 = icmp sgt i16 %shr, -128
+  %.val.i = select i1 %25, i16 %shr, i16 -128
+  %26 = icmp slt i16 %.val.i, 127
+  %retval.0.i = select i1 %26, i16 %.val.i, i16 127
   %conv5 = trunc i16 %retval.0.i to i8
-  %incdec.ptr6 = getelementptr inbounds i8, i8* %pDst.addr.011, i32 1
-  store i8 %conv5, i8* %pDst.addr.011, align 1
+  %incdec.ptr6 = getelementptr inbounds i8, ptr %pDst.addr.011, i32 1
+  store i8 %conv5, ptr %pDst.addr.011, align 1
   %inc = add nuw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @ssatmul_8t_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_8t_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_8t_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -2520,34 +2469,31 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
   %induction = or <8 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
   %0 = icmp ule <8 x i32> %induction, %broadcast.splat23
-  %1 = bitcast i8* %next.gep to <8 x i8>*
-  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %1, i32 1, <8 x i1> %0, <8 x i8> undef)
-  %2 = sext <8 x i8> %wide.masked.load to <8 x i16>
-  %3 = bitcast i8* %next.gep20 to <8 x i8>*
-  %wide.masked.load24 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %3, i32 1, <8 x i1> %0, <8 x i8> undef)
-  %4 = sext <8 x i8> %wide.masked.load24 to <8 x i16>
-  %5 = mul nsw <8 x i16> %4, %2
-  %6 = ashr <8 x i16> %5, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %7 = icmp sgt <8 x i16> %6, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %8 = select <8 x i1> %7, <8 x i16> %6, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %9 = icmp slt <8 x i16> %8, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %10 = select <8 x i1> %9, <8 x i16> %8, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %11 = trunc <8 x i16> %10 to <8 x i8>
-  %12 = bitcast i8* %next.gep21 to <8 x i8>*
-  call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %11, <8 x i8>* %12, i32 1, <8 x i1> %0)
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %next.gep, i32 1, <8 x i1> %0, <8 x i8> undef)
+  %1 = sext <8 x i8> %wide.masked.load to <8 x i16>
+  %wide.masked.load24 = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %next.gep20, i32 1, <8 x i1> %0, <8 x i8> undef)
+  %2 = sext <8 x i8> %wide.masked.load24 to <8 x i16>
+  %3 = mul nsw <8 x i16> %2, %1
+  %4 = ashr <8 x i16> %3, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %5 = icmp sgt <8 x i16> %4, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %6 = select <8 x i1> %5, <8 x i16> %4, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %7 = icmp slt <8 x i16> %6, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %8 = select <8 x i1> %7, <8 x i16> %6, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %9 = trunc <8 x i16> %8 to <8 x i8>
+  call void @llvm.masked.store.v8i8.p0(<8 x i8> %9, ptr %next.gep21, i32 1, <8 x i1> %0)
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ssatmul_16t_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_16t_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_16t_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -2712,34 +2658,31 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <16 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <16 x i32> %broadcast.splatinsert, <16 x i32> undef, <16 x i32> zeroinitializer
   %induction = or <16 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
   %0 = icmp ule <16 x i32> %induction, %broadcast.splat23
-  %1 = bitcast i8* %next.gep to <16 x i8>*
-  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %0, <16 x i8> undef)
-  %2 = sext <16 x i8> %wide.masked.load to <16 x i16>
-  %3 = bitcast i8* %next.gep20 to <16 x i8>*
-  %wide.masked.load24 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %3, i32 1, <16 x i1> %0, <16 x i8> undef)
-  %4 = sext <16 x i8> %wide.masked.load24 to <16 x i16>
-  %5 = mul nsw <16 x i16> %4, %2
-  %6 = ashr <16 x i16> %5, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %7 = icmp sgt <16 x i16> %6, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %8 = select <16 x i1> %7, <16 x i16> %6, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %9 = icmp slt <16 x i16> %8, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %10 = select <16 x i1> %9, <16 x i16> %8, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %11 = trunc <16 x i16> %10 to <16 x i8>
-  %12 = bitcast i8* %next.gep21 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %11, <16 x i8>* %12, i32 1, <16 x i1> %0)
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %next.gep, i32 1, <16 x i1> %0, <16 x i8> undef)
+  %1 = sext <16 x i8> %wide.masked.load to <16 x i16>
+  %wide.masked.load24 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %next.gep20, i32 1, <16 x i1> %0, <16 x i8> undef)
+  %2 = sext <16 x i8> %wide.masked.load24 to <16 x i16>
+  %3 = mul nsw <16 x i16> %2, %1
+  %4 = ashr <16 x i16> %3, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %5 = icmp sgt <16 x i16> %4, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %6 = select <16 x i1> %5, <16 x i16> %4, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %7 = icmp slt <16 x i16> %6, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %8 = select <16 x i1> %7, <16 x i16> %6, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %9 = trunc <16 x i16> %8 to <16 x i8>
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %9, ptr %next.gep21, i32 1, <16 x i1> %0)
   %index.next = add i32 %index, 16
-  %13 = icmp eq i32 %index.next, %n.vec
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, %n.vec
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @ssatmul_16ti_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @ssatmul_16ti_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: ssatmul_16ti_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -2904,47 +2847,44 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %broadcast.splatinsert = insertelement <16 x i32> undef, i32 %index, i32 0
   %broadcast.splat = shufflevector <16 x i32> %broadcast.splatinsert, <16 x i32> undef, <16 x i32> zeroinitializer
   %induction = or <16 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
   %0 = icmp ule <16 x i32> %induction, %broadcast.splat23
-  %1 = bitcast i8* %next.gep to <16 x i8>*
-  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %0, <16 x i8> undef)
-  %2 = shufflevector <16 x i8> %wide.masked.load, <16 x i8> %wide.masked.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %3 = shufflevector <16 x i8> %wide.masked.load, <16 x i8> %wide.masked.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %next.gep, i32 1, <16 x i1> %0, <16 x i8> undef)
+  %1 = shufflevector <16 x i8> %wide.masked.load, <16 x i8> %wide.masked.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %2 = shufflevector <16 x i8> %wide.masked.load, <16 x i8> %wide.masked.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %3 = sext <8 x i8> %1 to <8 x i16>
   %4 = sext <8 x i8> %2 to <8 x i16>
-  %5 = sext <8 x i8> %3 to <8 x i16>
-  %6 = bitcast i8* %next.gep20 to <16 x i8>*
-  %wide.masked.load24 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %6, i32 1, <16 x i1> %0, <16 x i8> undef)
-  %7 = shufflevector <16 x i8> %wide.masked.load24, <16 x i8> %wide.masked.load24, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %8 = shufflevector <16 x i8> %wide.masked.load24, <16 x i8> %wide.masked.load24, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %9 = sext <8 x i8> %7 to <8 x i16>
-  %10 = sext <8 x i8> %8 to <8 x i16>
-  %11 = mul <8 x i16> %9, %4
-  %12 = mul <8 x i16> %10, %5
-  %13 = ashr <8 x i16> %11, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %14 = ashr <8 x i16> %12, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %15 = icmp sgt <8 x i16> %13, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %16 = icmp sgt <8 x i16> %14, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %17 = select <8 x i1> %15, <8 x i16> %13, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %18 = select <8 x i1> %16, <8 x i16> %14, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
-  %19 = icmp slt <8 x i16> %17, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %20 = icmp slt <8 x i16> %18, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %21 = select <8 x i1> %19, <8 x i16> %17, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %22 = select <8 x i1> %20, <8 x i16> %18, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
-  %23 = shufflevector <8 x i16> %21, <8 x i16> %22, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  %24 = trunc <16 x i16> %23 to <16 x i8>
-  %25 = bitcast i8* %next.gep21 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %24, <16 x i8>* %25, i32 1, <16 x i1> %0)
+  %wide.masked.load24 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %next.gep20, i32 1, <16 x i1> %0, <16 x i8> undef)
+  %5 = shufflevector <16 x i8> %wide.masked.load24, <16 x i8> %wide.masked.load24, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %6 = shufflevector <16 x i8> %wide.masked.load24, <16 x i8> %wide.masked.load24, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %7 = sext <8 x i8> %5 to <8 x i16>
+  %8 = sext <8 x i8> %6 to <8 x i16>
+  %9 = mul <8 x i16> %7, %3
+  %10 = mul <8 x i16> %8, %4
+  %11 = ashr <8 x i16> %9, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %12 = ashr <8 x i16> %10, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %13 = icmp sgt <8 x i16> %11, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %14 = icmp sgt <8 x i16> %12, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %15 = select <8 x i1> %13, <8 x i16> %11, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %16 = select <8 x i1> %14, <8 x i16> %12, <8 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+  %17 = icmp slt <8 x i16> %15, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %18 = icmp slt <8 x i16> %16, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %19 = select <8 x i1> %17, <8 x i16> %15, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %20 = select <8 x i1> %18, <8 x i16> %16, <8 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+  %21 = shufflevector <8 x i16> %19, <8 x i16> %20, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %22 = trunc <16 x i16> %21 to <16 x i8>
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %22, ptr %next.gep21, i32 1, <16 x i1> %0)
   %index.next = add i32 %index, 16
-  %26 = icmp eq i32 %index.next, %n.vec
-  br i1 %26, label %for.cond.cleanup, label %vector.body
+  %23 = icmp eq i32 %index.next, %n.vec
+  br i1 %23, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @usatmul_8_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_8_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_8_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -3005,39 +2945,36 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader23:                             ; preds = %middle.block, %for.body.preheader
   %i.014.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.013.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.012.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
-  %pDst.addr.011.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
+  %pSrcA.addr.013.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.012.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pDst.addr.011.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -8
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end19 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end19 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %0, align 1
-  %1 = zext <8 x i8> %wide.load to <8 x i16>
-  %2 = bitcast i8* %next.gep20 to <8 x i8>*
-  %wide.load22 = load <8 x i8>, <8 x i8>* %2, align 1
-  %3 = zext <8 x i8> %wide.load22 to <8 x i16>
-  %4 = mul nuw <8 x i16> %3, %1
-  %5 = lshr <8 x i16> %4, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %6 = icmp ult <8 x i16> %5, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
-  %7 = select <8 x i1> %6, <8 x i16> %5, <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
-  %8 = trunc <8 x i16> %7 to <8 x i8>
-  %9 = bitcast i8* %next.gep21 to <8 x i8>*
-  store <8 x i8> %8, <8 x i8>* %9, align 1
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <8 x i8>, ptr %next.gep, align 1
+  %0 = zext <8 x i8> %wide.load to <8 x i16>
+  %wide.load22 = load <8 x i8>, ptr %next.gep20, align 1
+  %1 = zext <8 x i8> %wide.load22 to <8 x i16>
+  %2 = mul nuw <8 x i16> %1, %0
+  %3 = lshr <8 x i16> %2, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %4 = icmp ult <8 x i16> %3, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %5 = select <8 x i1> %4, <8 x i16> %3, <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %6 = trunc <8 x i16> %5 to <8 x i8>
+  store <8 x i8> %6, ptr %next.gep21, align 1
   %index.next = add i32 %index, 8
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -3048,28 +2985,28 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader23, %for.body
   %i.014 = phi i32 [ %inc, %for.body ], [ %i.014.ph, %for.body.preheader23 ]
-  %pSrcA.addr.013 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
-  %pSrcB.addr.012 = phi i8* [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
-  %pDst.addr.011 = phi i8* [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.013, i32 1
-  %11 = load i8, i8* %pSrcA.addr.013, align 1
-  %conv1 = zext i8 %11 to i16
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %pSrcB.addr.012, i32 1
-  %12 = load i8, i8* %pSrcB.addr.012, align 1
-  %conv3 = zext i8 %12 to i16
+  %pSrcA.addr.013 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
+  %pSrcB.addr.012 = phi ptr [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
+  %pDst.addr.011 = phi ptr [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.013, i32 1
+  %8 = load i8, ptr %pSrcA.addr.013, align 1
+  %conv1 = zext i8 %8 to i16
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %pSrcB.addr.012, i32 1
+  %9 = load i8, ptr %pSrcB.addr.012, align 1
+  %conv3 = zext i8 %9 to i16
   %mul = mul nuw i16 %conv3, %conv1
-  %13 = lshr i16 %mul, 7
-  %14 = icmp ult i16 %13, 255
-  %retval.0.i = select i1 %14, i16 %13, i16 255
+  %10 = lshr i16 %mul, 7
+  %11 = icmp ult i16 %10, 255
+  %retval.0.i = select i1 %11, i16 %10, i16 255
   %conv5 = trunc i16 %retval.0.i to i8
-  %incdec.ptr6 = getelementptr inbounds i8, i8* %pDst.addr.011, i32 1
-  store i8 %conv5, i8* %pDst.addr.011, align 1
+  %incdec.ptr6 = getelementptr inbounds i8, ptr %pDst.addr.011, i32 1
+  store i8 %conv5, ptr %pDst.addr.011, align 1
   %inc = add nuw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-define arm_aapcs_vfpcc void @usatmul_16_q7(i8* nocapture readonly %pSrcA, i8* nocapture readonly %pSrcB, i8* noalias nocapture %pDst, i32 %N) {
+define arm_aapcs_vfpcc void @usatmul_16_q7(ptr nocapture readonly %pSrcA, ptr nocapture readonly %pSrcB, ptr noalias nocapture %pDst, i32 %N) {
 ; CHECK-LABEL: usatmul_16_q7:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -3133,39 +3070,36 @@ for.body.preheader:                               ; preds = %entry
 
 for.body.preheader23:                             ; preds = %middle.block, %for.body.preheader
   %i.014.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
-  %pSrcA.addr.013.ph = phi i8* [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %pSrcB.addr.012.ph = phi i8* [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
-  %pDst.addr.011.ph = phi i8* [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
+  %pSrcA.addr.013.ph = phi ptr [ %pSrcA, %for.body.preheader ], [ %ind.end, %middle.block ]
+  %pSrcB.addr.012.ph = phi ptr [ %pSrcB, %for.body.preheader ], [ %ind.end17, %middle.block ]
+  %pDst.addr.011.ph = phi ptr [ %pDst, %for.body.preheader ], [ %ind.end19, %middle.block ]
   br label %for.body
 
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i32 %N, -16
-  %ind.end = getelementptr i8, i8* %pSrcA, i32 %n.vec
-  %ind.end17 = getelementptr i8, i8* %pSrcB, i32 %n.vec
-  %ind.end19 = getelementptr i8, i8* %pDst, i32 %n.vec
+  %ind.end = getelementptr i8, ptr %pSrcA, i32 %n.vec
+  %ind.end17 = getelementptr i8, ptr %pSrcB, i32 %n.vec
+  %ind.end19 = getelementptr i8, ptr %pDst, i32 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %pSrcA, i32 %index
-  %next.gep20 = getelementptr i8, i8* %pSrcB, i32 %index
-  %next.gep21 = getelementptr i8, i8* %pDst, i32 %index
-  %0 = bitcast i8* %next.gep to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = zext <16 x i8> %wide.load to <16 x i16>
-  %2 = bitcast i8* %next.gep20 to <16 x i8>*
-  %wide.load22 = load <16 x i8>, <16 x i8>* %2, align 1
-  %3 = zext <16 x i8> %wide.load22 to <16 x i16>
-  %4 = mul nuw <16 x i16> %3, %1
-  %5 = lshr <16 x i16> %4, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %6 = icmp ult <16 x i16> %5, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
-  %7 = select <16 x i1> %6, <16 x i16> %5, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = bitcast i8* %next.gep21 to <16 x i8>*
-  store <16 x i8> %8, <16 x i8>* %9, align 1
+  %next.gep = getelementptr i8, ptr %pSrcA, i32 %index
+  %next.gep20 = getelementptr i8, ptr %pSrcB, i32 %index
+  %next.gep21 = getelementptr i8, ptr %pDst, i32 %index
+  %wide.load = load <16 x i8>, ptr %next.gep, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i16>
+  %wide.load22 = load <16 x i8>, ptr %next.gep20, align 1
+  %1 = zext <16 x i8> %wide.load22 to <16 x i16>
+  %2 = mul nuw <16 x i16> %1, %0
+  %3 = lshr <16 x i16> %2, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %4 = icmp ult <16 x i16> %3, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %5 = select <16 x i1> %4, <16 x i16> %3, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  store <16 x i8> %6, ptr %next.gep21, align 1
   %index.next = add i32 %index, 16
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %N
@@ -3176,34 +3110,34 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader23, %for.body
   %i.014 = phi i32 [ %inc, %for.body ], [ %i.014.ph, %for.body.preheader23 ]
-  %pSrcA.addr.013 = phi i8* [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
-  %pSrcB.addr.012 = phi i8* [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
-  %pDst.addr.011 = phi i8* [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %pSrcA.addr.013, i32 1
-  %11 = load i8, i8* %pSrcA.addr.013, align 1
-  %conv1 = zext i8 %11 to i16
-  %incdec.ptr2 = getelementptr inbounds i8, i8* %pSrcB.addr.012, i32 1
-  %12 = load i8, i8* %pSrcB.addr.012, align 1
-  %conv3 = zext i8 %12 to i16
+  %pSrcA.addr.013 = phi ptr [ %incdec.ptr, %for.body ], [ %pSrcA.addr.013.ph, %for.body.preheader23 ]
+  %pSrcB.addr.012 = phi ptr [ %incdec.ptr2, %for.body ], [ %pSrcB.addr.012.ph, %for.body.preheader23 ]
+  %pDst.addr.011 = phi ptr [ %incdec.ptr6, %for.body ], [ %pDst.addr.011.ph, %for.body.preheader23 ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %pSrcA.addr.013, i32 1
+  %8 = load i8, ptr %pSrcA.addr.013, align 1
+  %conv1 = zext i8 %8 to i16
+  %incdec.ptr2 = getelementptr inbounds i8, ptr %pSrcB.addr.012, i32 1
+  %9 = load i8, ptr %pSrcB.addr.012, align 1
+  %conv3 = zext i8 %9 to i16
   %mul = mul nuw i16 %conv3, %conv1
-  %13 = lshr i16 %mul, 7
-  %14 = icmp ult i16 %13, 255
-  %retval.0.i = select i1 %14, i16 %13, i16 255
+  %10 = lshr i16 %mul, 7
+  %11 = icmp ult i16 %10, 255
+  %retval.0.i = select i1 %11, i16 %10, i16 255
   %conv5 = trunc i16 %retval.0.i to i8
-  %incdec.ptr6 = getelementptr inbounds i8, i8* %pDst.addr.011, i32 1
-  store i8 %conv5, i8* %pDst.addr.011, align 1
+  %incdec.ptr6 = getelementptr inbounds i8, ptr %pDst.addr.011, i32 1
+  store i8 %conv5, ptr %pDst.addr.011, align 1
   %inc = add nuw i32 %i.014, 1
   %exitcond = icmp eq i32 %inc, %N
   br i1 %exitcond, label %for.cond.cleanup, label %for.body
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32, <8 x i1>, <8 x i16>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32, <4 x i1>, <4 x i16>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32, <16 x i1>, <16 x i8>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i16.p0(<4 x i16>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i8.p0(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32, <16 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
index 6706d73ccb894..94397f0ae587b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -opaque-pointers %s -o - | FileCheck %s
 
 
-define arm_aapcs_vfpcc void @scatter_inc_minipred_4i32(<4 x i32> %data, i32* %dst, <4 x i32> %offs) {
+define arm_aapcs_vfpcc void @scatter_inc_minipred_4i32(<4 x i32> %data, ptr %dst, <4 x i32> %offs) {
 ; CHECK-LABEL: scatter_inc_minipred_4i32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    movs r1, #4
@@ -14,12 +14,12 @@ define arm_aapcs_vfpcc void @scatter_inc_minipred_4i32(<4 x i32> %data, i32* %ds
 ; CHECK-NEXT:    vstrwt.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
   %1 = add <4 x i32> %offs, <i32 4, i32 4, i32 4, i32 4>
-  %2 = getelementptr inbounds i32, i32* %dst, <4 x i32> %1
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %data, <4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  %2 = getelementptr inbounds i32, ptr %dst, <4 x i32> %1
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %2, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scatter_inc_mini_8i16(<8 x i16> %data, i16* %dst, <8 x i32> %offs) {
+define arm_aapcs_vfpcc void @scatter_inc_mini_8i16(<8 x i16> %data, ptr %dst, <8 x i32> %offs) {
 ; CHECK-LABEL: scatter_inc_mini_8i16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -53,12 +53,12 @@ define arm_aapcs_vfpcc void @scatter_inc_mini_8i16(<8 x i16> %data, i16* %dst, <
 ; CHECK-NEXT:    strh r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
   %1 = add <8 x i32> %offs, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
-  %2 = getelementptr inbounds i16, i16* %dst, <8 x i32> %1
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %data, <8 x i16*> %2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %2 = getelementptr inbounds i16, ptr %dst, <8 x i32> %1
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %data, <8 x ptr> %2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scatter_inc_mini_16i8(<16 x i8> %data, i8* %dst, <16 x i32> %offs) {
+define arm_aapcs_vfpcc void @scatter_inc_mini_16i8(<16 x i8> %data, ptr %dst, <16 x i32> %offs) {
 ; CHECK-LABEL: scatter_inc_mini_16i8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -119,12 +119,12 @@ define arm_aapcs_vfpcc void @scatter_inc_mini_16i8(<16 x i8> %data, i8* %dst, <1
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
   %1 = add <16 x i32> %offs, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %2 = getelementptr inbounds i8, i8* %dst, <16 x i32> %1
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %data, <16 x i8*> %2, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %2 = getelementptr inbounds i8, ptr %dst, <16 x i32> %1
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %data, <16 x ptr> %2, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scatter_inc_v4i32_complex(<4 x i32> %data1, <4 x i32> %data2, <4 x i32> %data3, i32* %dst, i32 %n) {
+define arm_aapcs_vfpcc void @scatter_inc_v4i32_complex(<4 x i32> %data1, <4 x i32> %data2, <4 x i32> %data3, ptr %dst, i32 %n) {
 ; CHECK-LABEL: scatter_inc_v4i32_complex:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -201,14 +201,14 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %0 = mul nuw nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
-  %1 = getelementptr inbounds i32, i32* %dst, <4 x i32> %0
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %data1, <4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %1 = getelementptr inbounds i32, ptr %dst, <4 x i32> %0
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data1, <4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %2 = add nuw nsw <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
-  %3 = getelementptr inbounds i32, i32* %dst, <4 x i32> %2
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %data2, <4 x i32*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %3 = getelementptr inbounds i32, ptr %dst, <4 x i32> %2
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data2, <4 x ptr> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %4 = add nuw nsw <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
-  %5 = getelementptr inbounds i32, i32* %dst, <4 x i32> %4
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %data3, <4 x i32*> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %5 = getelementptr inbounds i32, ptr %dst, <4 x i32> %4
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data3, <4 x ptr> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
   %6 = icmp eq i32 %index.next, %n.vec
@@ -222,7 +222,7 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
-define void @shl(i32* nocapture readonly %x, i32* noalias nocapture %y, i32 %n) {
+define void @shl(ptr nocapture readonly %x, ptr noalias nocapture %y, i32 %n) {
 ; CHECK-LABEL: shl:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -262,22 +262,21 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %2 = shl nsw <4 x i32> %vec.ind, <i32 2, i32 2, i32 2, i32 2>
-  %3 = getelementptr inbounds i32, i32* %y, <4 x i32> %2
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %wide.masked.load, <4 x i32*> %3, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %1 = shl nsw <4 x i32> %vec.ind, <i32 2, i32 2, i32 2, i32 2>
+  %2 = getelementptr inbounds i32, ptr %y, <4 x i32> %1
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %wide.masked.load, <4 x ptr> %2, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %4 = icmp eq i32 %index.next, %n.vec
-  br i1 %4, label %for.cond.cleanup, label %vector.body
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @shlor(i32* nocapture readonly %x, i32* noalias nocapture %y, i32 %n) {
+define void @shlor(ptr nocapture readonly %x, ptr noalias nocapture %y, i32 %n) {
 ; CHECK-LABEL: shlor:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -354,42 +353,41 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %2 = add nsw <4 x i32> %wide.masked.load, <i32 1, i32 1, i32 1, i32 1>
-  %3 = shl nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
-  %4 = getelementptr inbounds i32, i32* %y, <4 x i32> %3
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %2, <4 x i32*> %4, i32 4, <4 x i1> %active.lane.mask)
-  %5 = add nsw <4 x i32> %wide.masked.load, <i32 2, i32 2, i32 2, i32 2>
-  %6 = or <4 x i32> %3, <i32 2, i32 2, i32 2, i32 2>
-  %7 = getelementptr inbounds i32, i32* %y, <4 x i32> %6
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %5, <4 x i32*> %7, i32 4, <4 x i1> %active.lane.mask)
-  %8 = add nsw <4 x i32> %wide.masked.load, <i32 3, i32 3, i32 3, i32 3>
-  %9 = or <4 x i32> %3, <i32 4, i32 4, i32 4, i32 4>
-  %10 = getelementptr inbounds i32, i32* %y, <4 x i32> %9
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %8, <4 x i32*> %10, i32 4, <4 x i1> %active.lane.mask)
-  %11 = add nsw <4 x i32> %wide.masked.load, <i32 4, i32 4, i32 4, i32 4>
-  %12 = or <4 x i32> %3, <i32 6, i32 6, i32 6, i32 6>
-  %13 = getelementptr inbounds i32, i32* %y, <4 x i32> %12
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %11, <4 x i32*> %13, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %1 = add nsw <4 x i32> %wide.masked.load, <i32 1, i32 1, i32 1, i32 1>
+  %2 = shl nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
+  %3 = getelementptr inbounds i32, ptr %y, <4 x i32> %2
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %1, <4 x ptr> %3, i32 4, <4 x i1> %active.lane.mask)
+  %4 = add nsw <4 x i32> %wide.masked.load, <i32 2, i32 2, i32 2, i32 2>
+  %5 = or <4 x i32> %2, <i32 2, i32 2, i32 2, i32 2>
+  %6 = getelementptr inbounds i32, ptr %y, <4 x i32> %5
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %4, <4 x ptr> %6, i32 4, <4 x i1> %active.lane.mask)
+  %7 = add nsw <4 x i32> %wide.masked.load, <i32 3, i32 3, i32 3, i32 3>
+  %8 = or <4 x i32> %2, <i32 4, i32 4, i32 4, i32 4>
+  %9 = getelementptr inbounds i32, ptr %y, <4 x i32> %8
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %7, <4 x ptr> %9, i32 4, <4 x i1> %active.lane.mask)
+  %10 = add nsw <4 x i32> %wide.masked.load, <i32 4, i32 4, i32 4, i32 4>
+  %11 = or <4 x i32> %2, <i32 6, i32 6, i32 6, i32 6>
+  %12 = getelementptr inbounds i32, ptr %y, <4 x i32> %11
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %10, <4 x ptr> %12, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
   %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
-  %14 = icmp eq i32 %index.next, %n.vec
-  br i1 %14, label %for.cond.cleanup, label %vector.body
+  %13 = icmp eq i32 %index.next, %n.vec
+  br i1 %13, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-declare void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)
+declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-scaled.ll
index b2c7b22fb8293..709973ba31f3d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-scaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-scaled.ll
@@ -2,53 +2,53 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
 ; VLDRH.16 Qd, [base, offs, uxtw #1]
-define arm_aapcs_vfpcc void @scaled_v8i16_i16(i16* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @scaled_v8i16_i16(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: scaled_v8i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, offs, uxtw #1]
-define arm_aapcs_vfpcc void @scaled_v8f16_i16(i16* %base, <8 x i16>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @scaled_v8f16_i16(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: scaled_v8f16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, offs, uxtw #1]
-define arm_aapcs_vfpcc void @scaled_v8f16_half(half* %base, <8 x i16>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @scaled_v8f16_half(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: scaled_v8f16_half:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds half, half* %base, <8 x i32> %offs.zext
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds half, ptr %base, <8 x i32> %offs.zext
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offset
-define arm_aapcs_vfpcc void @scaled_v8i16_sext(i16* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @scaled_v8i16_sext(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: scaled_v8i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -81,15 +81,15 @@ define arm_aapcs_vfpcc void @scaled_v8i16_sext(i16* %base, <8 x i16>* %offptr, <
 ; CHECK-NEXT:    strh r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.sext
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.sext
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offset
-define arm_aapcs_vfpcc void @scaled_v8f16_sext(i16* %base, <8 x i16>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @scaled_v8f16_sext(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: scaled_v8f16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
@@ -116,46 +116,46 @@ define arm_aapcs_vfpcc void @scaled_v8f16_sext(i16* %base, <8 x i16>* %offptr, <
 ; CHECK-NEXT:    vstr.16 s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, zext(offs), uxtw #1]
-define arm_aapcs_vfpcc void @unsigned_scaled_v8i16_i8(i16* %base, <8 x i8>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @unsigned_scaled_v8i16_i8(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: unsigned_scaled_v8i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, zext(offs), uxtw #1]
-define arm_aapcs_vfpcc void @unsigned_scaled_v8f16_i8(i16* %base, <8 x i8>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @unsigned_scaled_v8f16_i8(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: unsigned_scaled_v8f16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %i16_ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i16*> %i16_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %i16_ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %i16_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scaled_v8i16_i16_passthru_icmp0(i16* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @scaled_v8i16_i16_passthru_icmp0(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: scaled_v8i16_i16_passthru_icmp0:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
@@ -163,15 +163,15 @@ define arm_aapcs_vfpcc void @scaled_v8i16_i16_passthru_icmp0(i16* %base, <8 x i1
 ; CHECK-NEXT:    vstrht.16 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> %offs.zext
   %mask = icmp sgt <8 x i16> %offs, zeroinitializer
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> %mask)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> %mask)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep(i16* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: scaled_v8i16_i16_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -207,14 +207,14 @@ define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep(i16* %base, <8 x i16>* %offpt
 ; CHECK-NEXT:    strh.w r0, [r12]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %offs
-  %ptrs2 = getelementptr inbounds i16, <8 x i16*> %ptrs, i16 20
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x i16>, ptr %offptr, align 2
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> %offs
+  %ptrs2 = getelementptr inbounds i16, <8 x ptr> %ptrs, i16 20
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep2(i16* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep2(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: scaled_v8i16_i16_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI9_0
@@ -233,11 +233,11 @@ define arm_aapcs_vfpcc void @scaled_v8i16_i16_2gep2(i16* %base, <8 x i16>* %offp
 ; CHECK-NEXT:    .short 76 @ 0x4c
 ; CHECK-NEXT:    .short 82 @ 0x52
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
-  %ptrs2 = getelementptr inbounds i16, <8 x i16*> %ptrs, i16 20
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %input, <8 x i16*> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i16> <i16 0, i16 3, i16 6, i16 9, i16 12, i16 15, i16 18, i16 21>
+  %ptrs2 = getelementptr inbounds i16, <8 x ptr> %ptrs, i16 20
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %input, <8 x ptr> %ptrs2, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-unscaled.ll
index 79a8cd5ad6765..68abc18fc527b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind16-unscaled.ll
@@ -2,71 +2,71 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
 ; VLDRB.u16 Qd, [base, offs]
-define arm_aapcs_vfpcc void @ext_unscaled_i8_i16(i8* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @ext_unscaled_i8_i16(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: ext_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
   %t = trunc <8 x i16> %input to <8 x i8>
-  call void @llvm.masked.scatter.v8i8(<8 x i8> %t, <8 x i8*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i8(<8 x i8> %t, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRB.u16 Qd, [base, offs]
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(i8* %base, <8 x i8>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
   %input.trunc = trunc <8 x i16> %input to <8 x i8>
-  call void @llvm.masked.scatter.v8i8(<8 x i8> %input.trunc, <8 x i8*> %byte_ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i8(<8 x i8> %input.trunc, <8 x ptr> %byte_ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, offs]
-define arm_aapcs_vfpcc void @unscaled_i16_i16(i8* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @unscaled_i16_i16(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.s16 Qd, [base, offs]
-define arm_aapcs_vfpcc void @unscaled_v8f16_i16(i8* %base, <8 x i16>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8f16_i16(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: unscaled_v8f16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.zext = zext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @unscaled_v8i16_sext(i8* %base, <8 x i16>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8i16_sext(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: unscaled_v8i16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -97,16 +97,16 @@ define arm_aapcs_vfpcc void @unscaled_v8i16_sext(i8* %base, <8 x i16>* %offptr,
 ; CHECK-NEXT:    strh r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @unscaled_v8f16_sext(i8* %base, <8 x i16>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8f16_sext(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: unscaled_v8f16_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q2, [r1]
@@ -131,16 +131,16 @@ define arm_aapcs_vfpcc void @unscaled_v8f16_sext(i8* %base, <8 x i16>* %offptr,
 ; CHECK-NEXT:    vstr.16 s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i16>, <8 x i16>* %offptr, align 2
+  %offs = load <8 x i16>, ptr %offptr, align 2
   %offs.sext = sext <8 x i16> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - i32 offsets
-define arm_aapcs_vfpcc void @unscaled_v8i16_noext(i8* %base, <8 x i32>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8i16_noext(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: unscaled_v8i16_noext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -171,15 +171,15 @@ define arm_aapcs_vfpcc void @unscaled_v8i16_noext(i8* %base, <8 x i32>* %offptr,
 ; CHECK-NEXT:    strh r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i32>, <8 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - i32 offsets
-define arm_aapcs_vfpcc void @unscaled_v8f16_noext(i8* %base, <8 x i32>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8f16_noext(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: unscaled_v8f16_noext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q2, [r1]
@@ -204,47 +204,47 @@ define arm_aapcs_vfpcc void @unscaled_v8f16_noext(i8* %base, <8 x i32>* %offptr,
 ; CHECK-NEXT:    vstr.16 s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i32>, <8 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, zext(offs)]
-define arm_aapcs_vfpcc void @unsigned_unscaled_i16_i8(i8* %base, <8 x i8>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @unsigned_unscaled_i16_i8(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.16 Qd, [base, zext(offs)]
-define arm_aapcs_vfpcc void @unsigned_unscaled_f16_i8(i8* %base, <8 x i8>* %offptr, <8 x half> %input) {
+define arm_aapcs_vfpcc void @unsigned_unscaled_f16_i8(ptr %base, ptr %offptr, <8 x half> %input) {
 ; CHECK-LABEL: unsigned_unscaled_f16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x half*>
-  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x half*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
+  call void @llvm.masked.scatter.v8f16(<8 x half> %input, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(i8* %base, <8 x i8>* %offptr, <8 x i64> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(ptr %base, ptr %offptr, <8 x i64> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i64_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -278,16 +278,16 @@ define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(i8* %base, <8 x i8>* %
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.sext = sext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
   %input.trunc = trunc <8 x i64> %input to <8 x i16>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <8 x i8>* %offptr, <8 x i64> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(ptr %base, ptr %offptr, <8 x i64> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i64_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -313,17 +313,17 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <8 x i8>*
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
   %input.trunc = trunc <8 x i64> %input to <8 x i16>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i32_i8(i8* %base, <8 x i8>* %offptr, <8 x i32> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i32_i8(ptr %base, ptr %offptr, <8 x i32> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -350,16 +350,16 @@ define arm_aapcs_vfpcc void @trunc_signed_unscaled_i32_i8(i8* %base, <8 x i8>* %
 ; CHECK-NEXT:    strh r4, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.sext = sext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.sext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.sext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
   %input.trunc = trunc <8 x i32> %input to <8 x i16>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(i8* %base, <8 x i8>* %offptr, <8 x i32> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(ptr %base, ptr %offptr, <8 x i32> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #16
@@ -373,17 +373,17 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(i8* %base, <8 x i8>*
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  %ptrs = bitcast <8 x i8*> %byte_ptrs to <8 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  %ptrs = bitcast <8 x ptr> %byte_ptrs to <8 x ptr>
   %input.trunc = trunc <8 x i32> %input to <8 x i16>
-  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x i16*> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16(<8 x i16> %input.trunc, <8 x ptr> %ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(i8* %base, <8 x i8>* %offptr, <8 x i16> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(ptr %base, ptr %offptr, <8 x i16> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -414,14 +414,14 @@ define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(i8* %base, <8 x i8>* %
 ; CHECK-NEXT:    strb r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.sext = sext <8 x i8> %offs to <8 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.sext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.sext
   %input.trunc = trunc <8 x i16> %input to <8 x i8>
-  call void @llvm.masked.scatter.v8i8(<8 x i8> %input.trunc, <8 x i8*> %byte_ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i8(<8 x i8> %input.trunc, <8 x ptr> %byte_ptrs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v8i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i8(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i16(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f16(<8 x half>, <8 x ptr>, i32, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-scaled.ll
index 760dff52bff47..39bf624730d84 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-scaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-scaled.ll
@@ -2,238 +2,238 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
 ; VLDRH.u32 Qd, [base, offs, #uxtw #1]
-define arm_aapcs_vfpcc void @ext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_scaled_i16_i32(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_scaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs, uxtw #2]
-define arm_aapcs_vfpcc void @scaled_i32_i32(i32* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @scaled_i32_i32(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: scaled_i32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs, uxtw #2]
-define arm_aapcs_vfpcc void @scaled_f32_i32(i32* %base, <4 x i32>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @scaled_f32_i32(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: scaled_f32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext, uxtw #2]
-define arm_aapcs_vfpcc void @unsigned_scaled_b_i32_i16(i32* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unsigned_scaled_b_i32_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: unsigned_scaled_b_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext, uxtw #2]
-define arm_aapcs_vfpcc void @signed_scaled_i32_i16(i32* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @signed_scaled_i32_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: signed_scaled_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext, uxtw #2]
-define arm_aapcs_vfpcc void @a_unsigned_scaled_f32_i16(i32* %base, <4 x i16>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @a_unsigned_scaled_f32_i16(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: a_unsigned_scaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext, uxtw #2]
-define arm_aapcs_vfpcc void @b_signed_scaled_f32_i16(i32* %base, <4 x i16>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @b_signed_scaled_f32_i16(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: b_signed_scaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs.sext, uxtw #1]
-define arm_aapcs_vfpcc void @ext_signed_scaled_i16_i16(i16* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_signed_scaled_i16_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_signed_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRH.32 Qd, [base, offs.sext, uxtw #1]
-define arm_aapcs_vfpcc void @ext_unsigned_scaled_i16_i16(i16* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unsigned_scaled_i16_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unsigned_scaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext, uxtw #2]
-define arm_aapcs_vfpcc void @unsigned_scaled_b_i32_i8(i32* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unsigned_scaled_b_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: unsigned_scaled_b_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext, uxtw #2]
-define arm_aapcs_vfpcc void @signed_scaled_i32_i8(i32* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @signed_scaled_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: signed_scaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext, uxtw #2]
-define arm_aapcs_vfpcc void @a_unsigned_scaled_f32_i8(i32* %base, <4 x i8>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @a_unsigned_scaled_f32_i8(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: a_unsigned_scaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext, uxtw #2]
-define arm_aapcs_vfpcc void @b_signed_scaled_f32_i8(i32* %base, <4 x i8>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @b_signed_scaled_f32_i8(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: b_signed_scaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %i32_ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.z32 Qd, [base, offs.sext, uxtw #1]
-define arm_aapcs_vfpcc void @ext_signed_scaled_i16_i8(i16* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_signed_scaled_i16_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_signed_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.sext
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.sext
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.z32 Qd, [base, offs.zext, uxtw #1]
-define arm_aapcs_vfpcc void @ext_unsigned_scaled_i16_i8(i16* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unsigned_scaled_i16_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unsigned_scaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1, uxtw #1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs.zext
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep(i16* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_scaled_i16_i32_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
@@ -245,15 +245,15 @@ define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep(i16* %base, <4 x i32>* %off
 ; CHECK-NEXT:    vstrh.32 q0, [r3, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs
-  %ptrs2 = getelementptr inbounds i16, <4 x i16*> %ptrs, i16 5
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs
+  %ptrs2 = getelementptr inbounds i16, <4 x ptr> %ptrs, i16 5
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs2, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs2, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep2(i16* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep2(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_scaled_i16_i32_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI16_0
@@ -268,15 +268,15 @@ define arm_aapcs_vfpcc void @ext_scaled_i16_i32_2gep2(i16* %base, <4 x i32>* %of
 ; CHECK-NEXT:    .long 22 @ 0x16
 ; CHECK-NEXT:    .long 28 @ 0x1c
 entry:
-  %ptrs = getelementptr inbounds i16, i16* %base, <4 x i16> <i16 0, i16 3, i16 6, i16 9>
-  %ptrs2 = getelementptr inbounds i16, <4 x i16*> %ptrs, i16 5
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i16> <i16 0, i16 3, i16 6, i16 9>
+  %ptrs2 = getelementptr inbounds i16, <4 x ptr> %ptrs, i16 5
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs2, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs2, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
index eb56a2ff688fb..cf49c8e0486ce 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
@@ -2,293 +2,293 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
 ; VLDRB.u32 Qd, [base, offs]
-define arm_aapcs_vfpcc void @ext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unscaled_i8_i32(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unscaled_i8_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
   %t = trunc <4 x i32> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs]
-define arm_aapcs_vfpcc void @ext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unscaled_i16_i32(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unscaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs]
-define arm_aapcs_vfpcc void @unscaled_i32_i32(i8* %base, <4 x i32>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unscaled_i32_i32(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: unscaled_i32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs]
-define arm_aapcs_vfpcc void @unscaled_f32_i32(i8* %base, <4 x i32>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @unscaled_f32_i32(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: unscaled_f32_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32>, <4 x i32>* %offptr, align 4
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @unsigned_unscaled_b_i32_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unsigned_unscaled_b_i32_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: unsigned_unscaled_b_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @signed_unscaled_i32_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @signed_unscaled_i32_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: signed_unscaled_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @a_unsigned_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @a_unsigned_unscaled_f32_i16(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: a_unsigned_unscaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @b_signed_unscaled_f32_i16(i8* %base, <4 x i16>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @b_signed_unscaled_f32_i16(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: b_signed_unscaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @ext_signed_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_signed_unscaled_i16_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_signed_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i16_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i16_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unsigned_unscaled_i16_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %t = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRB.u32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @ext_signed_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_signed_unscaled_i8_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_signed_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.sext = sext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
   %t = trunc <4 x i32> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRB.s32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i8_i16(i8* %base, <4 x i16>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i8_i16(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unsigned_unscaled_i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q1, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16>, <4 x i16>* %offptr, align 2
+  %offs = load <4 x i16>, ptr %offptr, align 2
   %offs.zext = zext <4 x i16> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
   %t = trunc <4 x i32> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @unsigned_unscaled_b_i32_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @unsigned_unscaled_b_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: unsigned_unscaled_b_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @signed_unscaled_i32_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @signed_unscaled_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: signed_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @a_unsigned_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @a_unsigned_unscaled_f32_i8(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: a_unsigned_unscaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @b_signed_unscaled_f32_i8(i8* %base, <4 x i8>* %offptr, <4 x float> %input) {
+define arm_aapcs_vfpcc void @b_signed_unscaled_f32_i8(ptr %base, ptr %offptr, <4 x float> %input) {
 ; CHECK-LABEL: b_signed_unscaled_f32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*>
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %input, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs.sext]
-define arm_aapcs_vfpcc void @ext_signed_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_signed_unscaled_i8_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_signed_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
   %t = trunc <4 x i32> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; VLDRH.u32 Qd, [base, offs.zext]
-define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i8_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @ext_unsigned_unscaled_i8_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: ext_unsigned_unscaled_i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
   %t = trunc <4 x i32> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %t, <4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(i8* %base, <4 x i8>* %offptr, <4 x i64> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(ptr %base, ptr %offptr, <4 x i64> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i64_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q2, [r1]
@@ -298,16 +298,16 @@ define arm_aapcs_vfpcc void @trunc_signed_unscaled_i64_i8(i8* %base, <4 x i8>* %
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %input.trunc = trunc <4 x i64> %input to <4 x i32>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input.trunc, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input.trunc, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <4 x i8>* %offptr, <4 x i64> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(ptr %base, ptr %offptr, <4 x i64> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i64_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q2, [r1]
@@ -317,48 +317,48 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <4 x i8>*
 ; CHECK-NEXT:    vstrw.32 q0, [r0, q2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %input.trunc = trunc <4 x i64> %input to <4 x i32>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input.trunc, <4 x i32*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %input.trunc, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i32_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %input.trunc = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %input.trunc, <4 x i16*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %input.trunc, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(i8* %base, <4 x i8>* %offptr, <4 x i32> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(ptr %base, ptr %offptr, <4 x i32> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
-  %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*>
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %ptrs = bitcast <4 x ptr> %byte_ptrs to <4 x ptr>
   %input.trunc = trunc <4 x i32> %input to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %input.trunc, <4 x i16*> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %input.trunc, <4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr, <4 x i16> %input) {
+define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(ptr %base, ptr %offptr, <4 x i16> %input) {
 ; CHECK-LABEL: trunc_signed_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.i32 q1, #0xff
@@ -367,15 +367,15 @@ define arm_aapcs_vfpcc void @trunc_signed_unscaled_i16_i8(i8* %base, <4 x i8>* %
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.sext = sext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.sext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.sext
   %input.trunc = trunc <4 x i16> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %input.trunc, <4 x i8*> %byte_ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %input.trunc, <4 x ptr> %byte_ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>* %offptr, <4 x i16> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(ptr %base, ptr %offptr, <4 x i16> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.i32 q1, #0xff
@@ -384,16 +384,16 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(i8* %base, <4 x i8>*
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q2]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8>, <4 x i8>* %offptr, align 1
+  %offs = load <4 x i8>, ptr %offptr, align 1
   %offs.zext = zext <4 x i8> %offs to <4 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs.zext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
   %input.trunc = trunc <4 x i16> %input to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %input.trunc, <4 x i8*> %byte_ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %input.trunc, <4 x ptr> %byte_ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind8-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind8-unscaled.ll
index 0034b2dbd6bf7..7a5c1b48ce27d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind8-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind8-unscaled.ll
@@ -2,21 +2,21 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - -opaque-pointers | FileCheck %s
 
 ; VLDRB.8
-define arm_aapcs_vfpcc void @unscaled_v16i8_i8(i8* %base, <16 x i8>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_i8(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r1]
 ; CHECK-NEXT:    vstrb.8 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.zext
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @unscaled_v8i8_i8(i8* %base, <8 x i8>* %offptr, <8 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v8i8_i8(ptr %base, ptr %offptr, <8 x i8> %input) {
 ; CHECK-LABEL: unscaled_v8i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r1]
@@ -24,15 +24,15 @@ define arm_aapcs_vfpcc void @unscaled_v8i8_i8(i8* %base, <8 x i8>* %offptr, <8 x
 ; CHECK-NEXT:    vstrb.16 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x i8>, <8 x i8>* %offptr, align 1
+  %offs = load <8 x i8>, ptr %offptr, align 1
   %offs.zext = zext <8 x i8> %offs to <8 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i32> %offs.zext
-  call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %input, <8 x i8*> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <8 x i32> %offs.zext
+  call void @llvm.masked.scatter.v8i8.v8p0(<8 x i8> %input, <8 x ptr> %ptrs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @unscaled_v2i8_i8(i8* %base, <2 x i8>* %offptr, <2 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v2i8_i8(ptr %base, ptr %offptr, <2 x i8> %input) {
 ; CHECK-LABEL: unscaled_v2i8_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r2, [r1]
@@ -48,15 +48,15 @@ define arm_aapcs_vfpcc void @unscaled_v2i8_i8(i8* %base, <2 x i8>* %offptr, <2 x
 ; CHECK-NEXT:    strb r2, [r0, r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i8>, <2 x i8>* %offptr, align 1
+  %offs = load <2 x i8>, ptr %offptr, align 1
   %offs.zext = zext <2 x i8> %offs to <2 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <2 x i32> %offs.zext
-  call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> %input, <2 x i8*> %ptrs, i32 1, <2 x i1> <i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <2 x i32> %offs.zext
+  call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> %input, <2 x ptr> %ptrs, i32 1, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @unscaled_v16i8_sext(i8* %base, <16 x i8>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_sext(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_sext:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -111,15 +111,15 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_sext(i8* %base, <16 x i8>* %offptr,
 ; CHECK-NEXT:    strb r0, [r7]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.sext = sext <16 x i8> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.sext
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - sext offsets
-define arm_aapcs_vfpcc void @unscaled_v16i8_i16(i8* %base, <16 x i16>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_i16(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -174,15 +174,15 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_i16(i8* %base, <16 x i16>* %offptr,
 ; CHECK-NEXT:    strb r0, [r7]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
 entry:
-  %offs = load <16 x i16>, <16 x i16>* %offptr, align 2
+  %offs = load <16 x i16>, ptr %offptr, align 2
   %offs.sext = sext <16 x i16> %offs to <16 x i32>
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.sext
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.sext
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Could be manually scaled offsets
-define arm_aapcs_vfpcc void @unscaled_v16i8_scaled(i32* %base, <16 x i8>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_scaled(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_scaled:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -241,16 +241,16 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_scaled(i32* %base, <16 x i8>* %offpt
 ; CHECK-NEXT:    strb r0, [r1]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 4
+  %offs = load <16 x i8>, ptr %offptr, align 4
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %ptrs32 = getelementptr inbounds i32, i32* %base, <16 x i32> %offs.zext
-  %ptrs = bitcast <16 x i32*> %ptrs32 to <16 x i8*>
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs32 = getelementptr inbounds i32, ptr %base, <16 x i32> %offs.zext
+  %ptrs = bitcast <16 x ptr> %ptrs32 to <16 x ptr>
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand - large offsets
-define arm_aapcs_vfpcc void @unscaled_v16i8_i8_next(i8* %base, <16 x i32>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_i8_next(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_i8_next:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -305,13 +305,13 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_i8_next(i8* %base, <16 x i32>* %offp
 ; CHECK-NEXT:    strb r0, [r7]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
 entry:
-  %offs = load <16 x i32>, <16 x i32>* %offptr, align 4
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <16 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <16 x i8>* %offptr, <16 x i64> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(ptr %base, ptr %offptr, <16 x i64> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i64_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -363,15 +363,15 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i64_i8(i8* %base, <16 x i8>
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.zext
   %input.trunc = trunc <16 x i64> %input to <16 x i8>
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input.trunc, <16 x i8*> %byte_ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input.trunc, <16 x ptr> %byte_ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(i8* %base, <16 x i8>* %offptr, <16 x i32> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(ptr %base, ptr %offptr, <16 x i32> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i32_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #16
@@ -387,15 +387,15 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i32_i8(i8* %base, <16 x i8>
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.zext
   %input.trunc = trunc <16 x i32> %input to <16 x i8>
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input.trunc, <16 x i8*> %byte_ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input.trunc, <16 x ptr> %byte_ptrs, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(i8* %base, <16 x i8>* %offptr, <16 x i16> %input) {
+define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(ptr %base, ptr %offptr, <16 x i16> %input) {
 ; CHECK-LABEL: trunc_unsigned_unscaled_i16_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #16
@@ -409,15 +409,15 @@ define arm_aapcs_vfpcc void @trunc_unsigned_unscaled_i16_i8(i8* %base, <16 x i8>
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
+  %offs = load <16 x i8>, ptr %offptr, align 1
   %offs.zext = zext <16 x i8> %offs to <16 x i32>
-  %byte_ptrs = getelementptr inbounds i8, i8* %base, <16 x i32> %offs.zext
+  %byte_ptrs = getelementptr inbounds i8, ptr %base, <16 x i32> %offs.zext
   %input.trunc = trunc <16 x i16> %input to <16 x i8>
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input.trunc, <16 x i8*> %byte_ptrs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input.trunc, <16 x ptr> %byte_ptrs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep(i8* %base, <16 x i8>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_i8_2gep:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -477,14 +477,14 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep(i8* %base, <16 x i8>* %offpt
 ; CHECK-NEXT:    strb r0, [r5]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %offs = load <16 x i8>, <16 x i8>* %offptr, align 1
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %offs
-  %ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i8 5
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <16 x i8>, ptr %offptr, align 1
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> %offs
+  %ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i8 5
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep2(i8* %base, <16 x i8>* %offptr, <16 x i8> %input) {
+define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep2(ptr %base, ptr %offptr, <16 x i8> %input) {
 ; CHECK-LABEL: unscaled_v16i8_i8_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    adr r1, .LCPI11_0
@@ -511,13 +511,13 @@ define arm_aapcs_vfpcc void @unscaled_v16i8_i8_2gep2(i8* %base, <16 x i8>* %offp
 ; CHECK-NEXT:    .byte 47 @ 0x2f
 ; CHECK-NEXT:    .byte 50 @ 0x32
 entry:
-  %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
-  %ptrs2 = getelementptr inbounds i8, <16 x i8*> %ptrs, i8 5
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %input, <16 x i8*> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %ptrs = getelementptr inbounds i8, ptr %base, <16 x i8> <i8 0, i8 3, i8 6, i8 9, i8 12, i8 15, i8 18, i8 21, i8 24, i8 27, i8 30, i8 33, i8 36, i8 39, i8 42, i8 45>
+  %ptrs2 = getelementptr inbounds i8, <16 x ptr> %ptrs, i8 5
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %input, <16 x ptr> %ptrs2, i32 1, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 
-declare void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8>, <2 x i8*>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>)
+declare void @llvm.masked.scatter.v2i8.v2p0(<2 x i8>, <2 x ptr>, i32, <2 x i1>)
+declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
index 01f1676515334..38cccc60f04e0 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
@@ -5,7 +5,7 @@
 ; i32
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v2i32(<2 x i32> %v, <2 x i32*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v2i32(<2 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov r2, s0
@@ -15,26 +15,26 @@ define arm_aapcs_vfpcc void @ptr_v2i32(<2 x i32> %v, <2 x i32*>* %offptr) {
 ; CHECK-NEXT:    str r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i32*>, <2 x i32*>* %offptr, align 4
-  call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> %v, <2 x i32*> %offs, i32 4, <2 x i1> <i1 true, i1 true>)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> %v, <2 x ptr> %offs, i32 4, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [offs, 0]
-define arm_aapcs_vfpcc void @ptr_v4i32(<4 x i32> %v, <4 x i32*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v4i32(<4 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i32*>, <4 x i32*>* %offptr, align 4
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %v, <4 x i32*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %v, <4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v8i32(<8 x i32> %v, <8 x i32*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v8i32(<8 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -59,13 +59,13 @@ define arm_aapcs_vfpcc void @ptr_v8i32(<8 x i32> %v, <8 x i32*>* %offptr) {
 ; CHECK-NEXT:    str r6, [r4]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i32*>, <8 x i32*>* %offptr, align 4
-  call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> %v, <8 x i32*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %v, <8 x ptr> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v16i32(<16 x i32> %v, <16 x i32*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v16i32(<16 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -114,15 +114,15 @@ define arm_aapcs_vfpcc void @ptr_v16i32(<16 x i32> %v, <16 x i32*>* %offptr) {
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <16 x i32*>, <16 x i32*>* %offptr, align 4
-  call void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> %v, <16 x i32*> %offs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <16 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32> %v, <16 x ptr> %offs, i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; f32
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v2f32(<2 x float> %v, <2 x float*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v2f32(<2 x float> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
@@ -130,26 +130,26 @@ define arm_aapcs_vfpcc void @ptr_v2f32(<2 x float> %v, <2 x float*>* %offptr) {
 ; CHECK-NEXT:    vstr s1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x float*>, <2 x float*>* %offptr, align 4
-  call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> %v, <2 x float*> %offs, i32 4, <2 x i1> <i1 true, i1 true>)
+  %offs = load <2 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> %v, <2 x ptr> %offs, i32 4, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
 ; VSTRW.32 Qd, [offs, 0]
-define arm_aapcs_vfpcc void @ptr_v4f32(<4 x float> %v, <4 x float*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v4f32(<4 x float> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vstrw.32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x float*>, <4 x float*>* %offptr, align 4
-  call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %v, <4 x float*> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %v, <4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v8f32(<8 x float> %v, <8 x float*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v8f32(<8 x float> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -170,15 +170,15 @@ define arm_aapcs_vfpcc void @ptr_v8f32(<8 x float> %v, <8 x float*>* %offptr) {
 ; CHECK-NEXT:    vstr s7, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %offs = load <8 x float*>, <8 x float*>* %offptr, align 4
-  call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> %v, <8 x float*> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> %v, <8 x ptr> %offs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; i16
 
 ; Expand.
-define arm_aapcs_vfpcc void @ptr_i16(<8 x i16> %v, <8 x i16*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_i16(<8 x i16> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -207,13 +207,13 @@ define arm_aapcs_vfpcc void @ptr_i16(<8 x i16> %v, <8 x i16*>* %offptr) {
 ; CHECK-NEXT:    strh r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %v, <8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %v, <8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v2i16_trunc(<2 x i32> %v, <2 x i16*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v2i16_trunc(<2 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v2i16_trunc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov r2, s0
@@ -223,13 +223,13 @@ define arm_aapcs_vfpcc void @ptr_v2i16_trunc(<2 x i32> %v, <2 x i16*>* %offptr)
 ; CHECK-NEXT:    strh r1, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <2 x i16*>, <2 x i16*>* %offptr, align 4
+  %offs = load <2 x ptr>, ptr %offptr, align 4
   %ext = trunc <2 x i32> %v to <2 x i16>
-  call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> %ext, <2 x i16*> %offs, i32 2, <2 x i1> <i1 true, i1 true>)
+  call void @llvm.masked.scatter.v2i16.v2p0(<2 x i16> %ext, <2 x ptr> %offs, i32 2, <2 x i1> <i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ptr_v4i16_trunc(<4 x i32> %v, <4 x i16*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v4i16_trunc(<4 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i16_trunc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -237,13 +237,13 @@ define arm_aapcs_vfpcc void @ptr_v4i16_trunc(<4 x i32> %v, <4 x i16*>* %offptr)
 ; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i16*>, <4 x i16*>* %offptr, align 4
+  %offs = load <4 x ptr>, ptr %offptr, align 4
   %ext = trunc <4 x i32> %v to <4 x i16>
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %ext, <4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %ext, <4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ptr_v4i16_dup(i32 %v, <4 x i16*> %offs) {
+define arm_aapcs_vfpcc void @ptr_v4i16_dup(i32 %v, <4 x ptr> %offs) {
 ; CHECK-LABEL: ptr_v4i16_dup:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vdup.32 q1, r0
@@ -255,12 +255,12 @@ entry:
   %ext = trunc i32 %v to i16
   %splatinsert = insertelement <4 x i16> poison, i16 %ext, i32 0
   %splat = shufflevector <4 x i16> %splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %splat, <4 x i16*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %splat, <4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v8i16_trunc(<8 x i32> %v, <8 x i16*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v8i16_trunc(<8 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i16_trunc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -285,16 +285,16 @@ define arm_aapcs_vfpcc void @ptr_v8i16_trunc(<8 x i32> %v, <8 x i16*>* %offptr)
 ; CHECK-NEXT:    strh r6, [r4]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i16*>, <8 x i16*>* %offptr, align 4
+  %offs = load <8 x ptr>, ptr %offptr, align 4
   %ext = trunc <8 x i32> %v to <8 x i16>
-  call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %ext, <8 x i16*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %ext, <8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; f16
 
 ; Expand.
-define arm_aapcs_vfpcc void @ptr_f16(<8 x half> %v, <8 x half*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_f16(<8 x half> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q2, [r0]
@@ -317,12 +317,12 @@ define arm_aapcs_vfpcc void @ptr_f16(<8 x half> %v, <8 x half*>* %offptr) {
 ; CHECK-NEXT:    vstr.16 s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <8 x half*>, <8 x half*>* %offptr, align 4
-  call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %v, <8 x half*> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <8 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v8f16.v8p0(<8 x half> %v, <8 x ptr> %offs, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ptr_v4f16(<4 x half> %v, <4 x half*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v4f16(<4 x half> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -336,12 +336,12 @@ define arm_aapcs_vfpcc void @ptr_v4f16(<4 x half> %v, <4 x half*>* %offptr) {
 ; CHECK-NEXT:    vstr.16 s0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x half*>, <4 x half*>* %offptr, align 4
-  call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %v, <4 x half*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %v, <4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ptr_v4f16_dup(half %v, <4 x half*> %offs) {
+define arm_aapcs_vfpcc void @ptr_v4f16_dup(half %v, <4 x ptr> %offs) {
 ; CHECK-LABEL: ptr_v4f16_dup:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov r0, r1, d2
@@ -354,14 +354,14 @@ define arm_aapcs_vfpcc void @ptr_v4f16_dup(half %v, <4 x half*> %offs) {
 entry:
   %splatinsert = insertelement <4 x half> poison, half %v, i32 0
   %splat = shufflevector <4 x half> %splatinsert, <4 x half> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %splat, <4 x half*> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %splat, <4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; i8
 
 ; Expand.
-define arm_aapcs_vfpcc void @ptr_i8(<16 x i8> %v, <16 x i8*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_i8(<16 x i8> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -412,13 +412,13 @@ define arm_aapcs_vfpcc void @ptr_i8(<16 x i8> %v, <16 x i8*>* %offptr) {
 ; CHECK-NEXT:    strb r0, [r7]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, pc}
 entry:
-  %offs = load <16 x i8*>, <16 x i8*>* %offptr, align 4
-  call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %v, <16 x i8*> %offs, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  %offs = load <16 x ptr>, ptr %offptr, align 4
+  call void @llvm.masked.scatter.v16i8.v16p0(<16 x i8> %v, <16 x ptr> %offs, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v8i8_trunc16(<8 x i16> %v, <8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v8i8_trunc16(<8 x i16> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_trunc16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -447,13 +447,13 @@ define arm_aapcs_vfpcc void @ptr_v8i8_trunc16(<8 x i16> %v, <8 x i8*>* %offptr)
 ; CHECK-NEXT:    strb r0, [r5]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %offs = load <8 x ptr>, ptr %offptr, align 4
   %ext = trunc <8 x i16> %v to <8 x i8>
-  call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %ext, <8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i8.v8p0(<8 x i8> %ext, <8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-define arm_aapcs_vfpcc void @ptr_v4i8_trunc32(<4 x i32> %v, <4 x i8*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v4i8_trunc32(<4 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v4i8_trunc32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -461,14 +461,14 @@ define arm_aapcs_vfpcc void @ptr_v4i8_trunc32(<4 x i32> %v, <4 x i8*>* %offptr)
 ; CHECK-NEXT:    vstrb.32 q0, [r0, q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %offs = load <4 x i8*>, <4 x i8*>* %offptr, align 4
+  %offs = load <4 x ptr>, ptr %offptr, align 4
   %ext = trunc <4 x i32> %v to <4 x i8>
-  call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %ext, <4 x i8*> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %ext, <4 x ptr> %offs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; Expand
-define arm_aapcs_vfpcc void @ptr_v8i8_trunc32(<8 x i32> %v, <8 x i8*>* %offptr) {
+define arm_aapcs_vfpcc void @ptr_v8i8_trunc32(<8 x i32> %v, ptr %offptr) {
 ; CHECK-LABEL: ptr_v8i8_trunc32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -493,15 +493,15 @@ define arm_aapcs_vfpcc void @ptr_v8i8_trunc32(<8 x i32> %v, <8 x i8*>* %offptr)
 ; CHECK-NEXT:    strb r6, [r4]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %offs = load <8 x i8*>, <8 x i8*>* %offptr, align 4
+  %offs = load <8 x ptr>, ptr %offptr, align 4
   %ext = trunc <8 x i32> %v to <8 x i8>
-  call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %ext, <8 x i8*> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  call void @llvm.masked.scatter.v8i8.v8p0(<8 x i8> %ext, <8 x ptr> %offs, i32 1, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
 ; loops
 
-define void @foo_ptr_p_int32_t(i32* %dest, i32** %src, i32 %n) {
+define void @foo_ptr_p_int32_t(ptr %dest, ptr %src, i32 %n) {
 ; CHECK-LABEL: foo_ptr_p_int32_t:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    bic r3, r2, #15
@@ -525,23 +525,21 @@ entry:
 
 vector.body:                                      ; preds = %entry, %vector.body
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32*, i32** %src, i32 %index
-  %1 = bitcast i32** %0 to <4 x i32*>*
-  %wide.load = load <4 x i32*>, <4 x i32*>* %1, align 4
-  %2 = icmp ne <4 x i32*> %wide.load, zeroinitializer
-  %3 = getelementptr inbounds i32, i32* %dest, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.v4p0i32(<4 x i32>* %4, i32 4, <4 x i1> %2, <4 x i32> undef)
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %wide.masked.load, <4 x i32*> %wide.load, i32 4, <4 x i1> %2)
+  %0 = getelementptr inbounds ptr, ptr %src, i32 %index
+  %wide.load = load <4 x ptr>, ptr %0, align 4
+  %1 = icmp ne <4 x ptr> %wide.load, zeroinitializer
+  %2 = getelementptr inbounds i32, ptr %dest, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.v4p0(ptr %2, i32 4, <4 x i1> %1, <4 x i32> undef)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %wide.masked.load, <4 x ptr> %wide.load, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %5 = icmp eq i32 %index.next, %n
-  br i1 %5, label %for.end, label %vector.body
+  %3 = icmp eq i32 %index.next, %n
+  br i1 %3, label %for.end, label %vector.body
 
 for.end:                                          ; preds = %vector.body, %entry
   ret void
 }
 
-define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
+define void @foo_ptr_p_float(ptr %dest, ptr %src, i32 %n) {
 ; CHECK-LABEL: foo_ptr_p_float:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    bic r3, r2, #15
@@ -565,25 +563,23 @@ entry:
 
 vector.body:                                      ; preds = %entry, %vector.body
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds float*, float** %src, i32 %index
-  %1 = bitcast float** %0 to <4 x float*>*
-  %wide.load = load <4 x float*>, <4 x float*>* %1, align 4
-  %2 = icmp ne <4 x float*> %wide.load, zeroinitializer
-  %3 = getelementptr inbounds float, float* %dest, i32 %index
-  %4 = bitcast float* %3 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.v4p0i32(<4 x i32>* %4, i32 4, <4 x i1> %2, <4 x i32> undef)
-  %5 = bitcast <4 x float*> %wide.load to <4 x i32*>
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %wide.masked.load, <4 x i32*> %5, i32 4, <4 x i1> %2)
+  %0 = getelementptr inbounds ptr, ptr %src, i32 %index
+  %wide.load = load <4 x ptr>, ptr %0, align 4
+  %1 = icmp ne <4 x ptr> %wide.load, zeroinitializer
+  %2 = getelementptr inbounds float, ptr %dest, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.v4p0(ptr %2, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %3 = bitcast <4 x ptr> %wide.load to <4 x ptr>
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %wide.masked.load, <4 x ptr> %3, i32 4, <4 x i1> %1)
   %index.next = add i32 %index, 4
-  %6 = icmp eq i32 %index.next, %n
-  br i1 %6, label %for.end, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.end, label %vector.body
 
 for.end:                                          ; preds = %vector.body, %entry
   ret void
 }
 
 ; VLSTW.u32 Qd, [P, 4]
-define arm_aapcs_vfpcc void @qi4(<4 x i32> %v, <4 x i32*> %p) {
+define arm_aapcs_vfpcc void @qi4(<4 x i32> %v, <4 x ptr> %p) {
 ; CHECK-LABEL: qi4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r0, #16
@@ -591,24 +587,24 @@ define arm_aapcs_vfpcc void @qi4(<4 x i32> %v, <4 x i32*> %p) {
 ; CHECK-NEXT:    vstrw.32 q0, [q1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %g = getelementptr inbounds i32, <4 x i32*> %p, i32 4
-  call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %v, <4 x i32*> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %g = getelementptr inbounds i32, <4 x ptr> %p, i32 4
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %v, <4 x ptr> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
   ret void
 }
 
-declare void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16>, <2 x i16*>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32>, <2 x i32*>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float>, <2 x float*>, i32, <2 x i1>)
-declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)
-declare void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32>, <8 x i32*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float>, <8 x float*>, i32, <8 x i1>)
-declare void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>)
-declare void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32>, <16 x i32*>, i32, <16 x i1>)
-declare <4 x i32> @llvm.masked.load.v4i32.v4p0i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.scatter.v2i16.v2p0(<2 x i16>, <2 x ptr>, i32, <2 x i1>)
+declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>)
+declare void @llvm.masked.scatter.v2f32.v2p0(<2 x float>, <2 x ptr>, i32, <2 x i1>)
+declare void @llvm.masked.scatter.v4i8.v4p0(<4 x i8>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f16.v4p0(<4 x half>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4f32.v4p0(<4 x float>, <4 x ptr>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v8i8.v8p0(<8 x i8>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i16.v8p0(<8 x i16>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f16.v8p0(<8 x half>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v8f32.v8p0(<8 x float>, <8 x ptr>, i32, <8 x i1>)
+declare void @llvm.masked.scatter.v16i8.v16p0(<16 x i8>, <16 x ptr>, i32, <16 x i1>)
+declare void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>, <16 x ptr>, i32, <16 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.v4p0(ptr, i32, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll
index 9c283fb6298ed..05f438acc3a7e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-sext-masked-load.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -early-live-intervals -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, i32 %blockSize, <4 x i16> %a) {
+define arm_aapcs_vfpcc <4 x float> @foo_v4i16(ptr nocapture readonly %pSrc, i32 %blockSize, <4 x i16> %a) {
 ; CHECK-LABEL: foo_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s16 q0, q0
@@ -12,12 +12,12 @@ define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSr
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
   %0 = sitofp <4 x i16> %wide.masked.load to <4 x float>
   ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x half> @foo_v8i8(ptr nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
 ; CHECK-LABEL: foo_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
@@ -27,12 +27,12 @@ define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc,
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer
-  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
   %0 = sitofp <8 x i8> %wide.masked.load to <8 x half>
   ret <8 x half> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x float> @foo_v4i8(ptr nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
 ; CHECK-LABEL: foo_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
@@ -43,12 +43,12 @@ define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc,
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer
-  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
   %0 = sitofp <4 x i8> %wide.masked.load to <4 x float>
   ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x double> @foo_v4i32(ptr nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
 ; CHECK-LABEL: foo_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -88,15 +88,15 @@ define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pS
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %0 = sitofp <4 x i32> %wide.masked.load to <4 x double>
   ret <4 x double> %0
 }
 
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
 
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32 immarg, <8 x i1>, <8 x i8>)
 
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32 immarg, <4 x i1>, <4 x i8>)
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shifts-scalar.ll b/llvm/test/CodeGen/Thumb2/mve-shifts-scalar.ll
index d46851b39826a..dc4da224bd449 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shifts-scalar.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shifts-scalar.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O3 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve %s --verify-machineinstrs -o - | FileCheck %s
 
-define dso_local arm_aapcs_vfpcc void @sink_shl_i32(i32* nocapture readonly %in, i32* noalias nocapture %out, i32 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_shl_i32(ptr nocapture readonly %in, ptr noalias nocapture %out, i32 %shift, i32 %N) {
 ; CHECK-LABEL: sink_shl_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -29,13 +29,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i32, i32* %in, i32 %index
-  %cast.in = bitcast i32* %gep.in to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i32, ptr %in, i32 %index
+  %wide.load = load <4 x i32>, ptr %gep.in, align 4
   %res = shl <4 x i32> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i32, i32* %out, i32 %index
-  %cast.out = bitcast i32* %gep.out to <4 x i32>*
-  store <4 x i32> %res, <4 x i32>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i32, ptr %out, i32 %index
+  store <4 x i32> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -44,7 +42,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_shl_i16(i16* nocapture readonly %in, i16* noalias nocapture %out, i16 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_shl_i16(ptr nocapture readonly %in, ptr noalias nocapture %out, i16 %shift, i32 %N) {
 ; CHECK-LABEL: sink_shl_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -72,13 +70,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i16, i16* %in, i32 %index
-  %cast.in = bitcast i16* %gep.in to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i16, ptr %in, i32 %index
+  %wide.load = load <8 x i16>, ptr %gep.in, align 4
   %res = shl <8 x i16> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i16, i16* %out, i32 %index
-  %cast.out = bitcast i16* %gep.out to <8 x i16>*
-  store <8 x i16> %res, <8 x i16>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i16, ptr %out, i32 %index
+  store <8 x i16> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -87,7 +83,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_shl_i8(i8* nocapture readonly %in, i8* noalias nocapture %out, i8 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_shl_i8(ptr nocapture readonly %in, ptr noalias nocapture %out, i8 %shift, i32 %N) {
 ; CHECK-LABEL: sink_shl_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -115,13 +111,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i8, i8* %in, i32 %index
-  %cast.in = bitcast i8* %gep.in to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i8, ptr %in, i32 %index
+  %wide.load = load <16 x i8>, ptr %gep.in, align 4
   %res = shl <16 x i8> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i8, i8* %out, i32 %index
-  %cast.out = bitcast i8* %gep.out to <16 x i8>*
-  store <16 x i8> %res, <16 x i8>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i8, ptr %out, i32 %index
+  store <16 x i8> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -130,7 +124,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_lshr_i32(i32* nocapture readonly %in, i32* noalias nocapture %out, i32 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_lshr_i32(ptr nocapture readonly %in, ptr noalias nocapture %out, i32 %shift, i32 %N) {
 ; CHECK-LABEL: sink_lshr_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -159,13 +153,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i32, i32* %in, i32 %index
-  %cast.in = bitcast i32* %gep.in to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i32, ptr %in, i32 %index
+  %wide.load = load <4 x i32>, ptr %gep.in, align 4
   %res = lshr <4 x i32> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i32, i32* %out, i32 %index
-  %cast.out = bitcast i32* %gep.out to <4 x i32>*
-  store <4 x i32> %res, <4 x i32>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i32, ptr %out, i32 %index
+  store <4 x i32> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -174,7 +166,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_lshr_i16(i16* nocapture readonly %in, i16* noalias nocapture %out, i16 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_lshr_i16(ptr nocapture readonly %in, ptr noalias nocapture %out, i16 %shift, i32 %N) {
 ; CHECK-LABEL: sink_lshr_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -203,13 +195,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i16, i16* %in, i32 %index
-  %cast.in = bitcast i16* %gep.in to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i16, ptr %in, i32 %index
+  %wide.load = load <8 x i16>, ptr %gep.in, align 4
   %res = lshr <8 x i16> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i16, i16* %out, i32 %index
-  %cast.out = bitcast i16* %gep.out to <8 x i16>*
-  store <8 x i16> %res, <8 x i16>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i16, ptr %out, i32 %index
+  store <8 x i16> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -218,7 +208,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_lshr_i8(i8* nocapture readonly %in, i8* noalias nocapture %out, i8 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_lshr_i8(ptr nocapture readonly %in, ptr noalias nocapture %out, i8 %shift, i32 %N) {
 ; CHECK-LABEL: sink_lshr_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -247,13 +237,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i8, i8* %in, i32 %index
-  %cast.in = bitcast i8* %gep.in to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i8, ptr %in, i32 %index
+  %wide.load = load <16 x i8>, ptr %gep.in, align 4
   %res = lshr <16 x i8> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i8, i8* %out, i32 %index
-  %cast.out = bitcast i8* %gep.out to <16 x i8>*
-  store <16 x i8> %res, <16 x i8>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i8, ptr %out, i32 %index
+  store <16 x i8> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -262,7 +250,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_ashr_i32(i32* nocapture readonly %in, i32* noalias nocapture %out, i32 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_ashr_i32(ptr nocapture readonly %in, ptr noalias nocapture %out, i32 %shift, i32 %N) {
 ; CHECK-LABEL: sink_ashr_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -291,13 +279,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i32, i32* %in, i32 %index
-  %cast.in = bitcast i32* %gep.in to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i32, ptr %in, i32 %index
+  %wide.load = load <4 x i32>, ptr %gep.in, align 4
   %res = ashr <4 x i32> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i32, i32* %out, i32 %index
-  %cast.out = bitcast i32* %gep.out to <4 x i32>*
-  store <4 x i32> %res, <4 x i32>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i32, ptr %out, i32 %index
+  store <4 x i32> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -306,7 +292,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_ashr_i16(i16* nocapture readonly %in, i16* noalias nocapture %out, i16 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_ashr_i16(ptr nocapture readonly %in, ptr noalias nocapture %out, i16 %shift, i32 %N) {
 ; CHECK-LABEL: sink_ashr_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -335,13 +321,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i16, i16* %in, i32 %index
-  %cast.in = bitcast i16* %gep.in to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i16, ptr %in, i32 %index
+  %wide.load = load <8 x i16>, ptr %gep.in, align 4
   %res = ashr <8 x i16> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i16, i16* %out, i32 %index
-  %cast.out = bitcast i16* %gep.out to <8 x i16>*
-  store <8 x i16> %res, <8 x i16>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i16, ptr %out, i32 %index
+  store <8 x i16> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body
@@ -350,7 +334,7 @@ exit:
   ret void
 }
 
-define dso_local arm_aapcs_vfpcc void @sink_ashr_i8(i8* nocapture readonly %in, i8* noalias nocapture %out, i8 %shift, i32 %N) {
+define dso_local arm_aapcs_vfpcc void @sink_ashr_i8(ptr nocapture readonly %in, ptr noalias nocapture %out, i8 %shift, i32 %N) {
 ; CHECK-LABEL: sink_ashr_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -379,13 +363,11 @@ vector.ph:
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %gep.in = getelementptr inbounds i8, i8* %in, i32 %index
-  %cast.in = bitcast i8* %gep.in to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %cast.in, align 4
+  %gep.in = getelementptr inbounds i8, ptr %in, i32 %index
+  %wide.load = load <16 x i8>, ptr %gep.in, align 4
   %res = ashr <16 x i8> %wide.load, %broadcast.splat11
-  %gep.out = getelementptr inbounds i8, i8* %out, i32 %index
-  %cast.out = bitcast i8* %gep.out to <16 x i8>*
-  store <16 x i8> %res, <16 x i8>* %cast.out, align 4
+  %gep.out = getelementptr inbounds i8, ptr %out, i32 %index
+  store <16 x i8> %res, ptr %gep.out, align 4
   %index.next = add i32 %index, 4
   %cmp = icmp eq i32 %index.next, %n.vec
   br i1 %cmp, label %exit, label %vector.body

diff  --git a/llvm/test/CodeGen/Thumb2/mve-stack.ll b/llvm/test/CodeGen/Thumb2/mve-stack.ll
index ea272e19b23fc..68beebc5968bc 100644
--- a/llvm/test/CodeGen/Thumb2/mve-stack.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-stack.ll
@@ -16,11 +16,9 @@ define arm_aapcs_vfpcc void @vstrw32() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [4 x i32], align 2
-  %g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2
-  %b = bitcast i32* %g to <4 x i32>*
-  store <4 x i32> zeroinitializer, <4 x i32>* %b, align 2
-  %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i32*)*)(i32* %arraydecay)
+  %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2
+  store <4 x i32> zeroinitializer, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -39,11 +37,9 @@ define arm_aapcs_vfpcc void @vstrh16() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [8 x i16], align 2
-  %g = getelementptr inbounds [8 x i16], [8 x i16]* %d, i32 0, i32 2
-  %b = bitcast i16* %g to <8 x i16>*
-  store <8 x i16> zeroinitializer, <8 x i16>* %b, align 2
-  %arraydecay = getelementptr inbounds [8 x i16], [8 x i16]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i16*)*)(i16* %arraydecay)
+  %g = getelementptr inbounds [8 x i16], ptr %d, i32 0, i32 2
+  store <8 x i16> zeroinitializer, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -62,11 +58,9 @@ define arm_aapcs_vfpcc void @vstrb8() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [16 x i8], align 2
-  %g = getelementptr inbounds [16 x i8], [16 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <16 x i8>*
-  store <16 x i8> zeroinitializer, <16 x i8>* %b, align 2
-  %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
+  %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2
+  store <16 x i8> zeroinitializer, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -85,11 +79,9 @@ define arm_aapcs_vfpcc void @vstrh32() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [4 x i16], align 2
-  %g = getelementptr inbounds [4 x i16], [4 x i16]* %d, i32 0, i32 2
-  %b = bitcast i16* %g to <4 x i16>*
-  store <4 x i16> <i16 6, i16 6, i16 6, i16 6>, <4 x i16>* %b, align 2
-  %arraydecay = getelementptr inbounds [4 x i16], [4 x i16]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i16*)*)(i16* %arraydecay)
+  %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2
+  store <4 x i16> <i16 6, i16 6, i16 6, i16 6>, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -108,11 +100,9 @@ define arm_aapcs_vfpcc void @vstrb32() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [4 x i8], align 2
-  %g = getelementptr inbounds [4 x i8], [4 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <4 x i8>*
-  store <4 x i8> <i8 6, i8 6, i8 6, i8 6>, <4 x i8>* %b, align 2
-  %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
+  %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2
+  store <4 x i8> <i8 6, i8 6, i8 6, i8 6>, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -131,11 +121,9 @@ define arm_aapcs_vfpcc void @vstrb16() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [8 x i8], align 2
-  %g = getelementptr inbounds [8 x i8], [8 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <8 x i8>*
-  store <8 x i8> zeroinitializer, <8 x i8>* %b, align 2
-  %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
+  %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2
+  store <8 x i8> zeroinitializer, ptr %g, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
   ret void
 }
 
@@ -154,11 +142,9 @@ define arm_aapcs_vfpcc <4 x i32> @vldrw32() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [4 x i32], align 2
-  %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i32*)*)(i32* %arraydecay)
-  %g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2
-  %b = bitcast i32* %g to <4 x i32>*
-  %l = load <4 x i32>, <4 x i32>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2
+  %l = load <4 x i32>, ptr %g, align 2
   ret <4 x i32> %l
 }
 
@@ -176,11 +162,9 @@ define arm_aapcs_vfpcc <8 x i16> @vldrh16() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [8 x i16], align 2
-  %arraydecay = getelementptr inbounds [8 x i16], [8 x i16]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i16*)*)(i16* %arraydecay)
-  %g = getelementptr inbounds [8 x i16], [8 x i16]* %d, i32 0, i32 2
-  %b = bitcast i16* %g to <8 x i16>*
-  %l = load <8 x i16>, <8 x i16>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [8 x i16], ptr %d, i32 0, i32 2
+  %l = load <8 x i16>, ptr %g, align 2
   ret <8 x i16> %l
 }
 
@@ -198,11 +182,9 @@ define arm_aapcs_vfpcc <16 x i8> @vldrb8() {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   %d = alloca [16 x i8], align 2
-  %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
-  %g = getelementptr inbounds [16 x i8], [16 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <16 x i8>*
-  %l = load <16 x i8>, <16 x i8>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2
+  %l = load <16 x i8>, ptr %g, align 2
   ret <16 x i8> %l
 }
 
@@ -221,11 +203,9 @@ define arm_aapcs_vfpcc <4 x i16> @vldrh32() {
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
   %d = alloca [4 x i16], align 2
-  %arraydecay = getelementptr inbounds [4 x i16], [4 x i16]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i16*)*)(i16* %arraydecay)
-  %g = getelementptr inbounds [4 x i16], [4 x i16]* %d, i32 0, i32 2
-  %b = bitcast i16* %g to <4 x i16>*
-  %l = load <4 x i16>, <4 x i16>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2
+  %l = load <4 x i16>, ptr %g, align 2
   ret <4 x i16> %l
 }
 
@@ -244,11 +224,9 @@ define arm_aapcs_vfpcc <4 x i8> @vldrb32() {
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
   %d = alloca [4 x i8], align 2
-  %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
-  %g = getelementptr inbounds [4 x i8], [4 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <4 x i8>*
-  %l = load <4 x i8>, <4 x i8>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2
+  %l = load <4 x i8>, ptr %g, align 2
   ret <4 x i8> %l
 }
 
@@ -267,11 +245,9 @@ define arm_aapcs_vfpcc <8 x i8> @vldrb16() {
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
   %d = alloca [8 x i8], align 2
-  %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %d, i32 0, i32 0
-  call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i8*)*)(i8* %arraydecay)
-  %g = getelementptr inbounds [8 x i8], [8 x i8]* %d, i32 0, i32 2
-  %b = bitcast i8* %g to <8 x i8>*
-  %l = load <8 x i8>, <8 x i8>* %b, align 2
+  call arm_aapcs_vfpcc void @func(ptr %d)
+  %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2
+  %l = load <8 x i8>, ptr %g, align 2
   ret <8 x i8> %l
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll b/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
index 359591219ce63..750947dc26186 100644
--- a/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
@@ -5,7 +5,7 @@
 ; active.lane.mask operand. (%exitcount.ptrcnt.to.int = ptrtoint). We
 ; need to make sure it is loop invariant.
 
-define i32 @a(i32* readnone %b, i8* %c) {
+define i32 @a(ptr readnone %b, ptr %c) {
 ; CHECK-LABEL: a:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -28,80 +28,78 @@ define i32 @a(i32* readnone %b, i8* %c) {
 ; CHECK-NEXT:  @ %bb.3: @ %while.end
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %0 = bitcast i32* %b to i8*
-  %cmp3 = icmp ugt i8* %0, %c
+  %cmp3 = icmp ugt ptr %b, %c
   br i1 %cmp3, label %while.body.preheader, label %while.end
 
 while.body.preheader:                             ; preds = %entry
-  %c5 = ptrtoint i8* %c to i32
-  %1 = sub i32 0, %c5
-  %uglygep = getelementptr i8, i8* %0, i32 %1
-  %exitcount.ptrcnt.to.int = ptrtoint i8* %uglygep to i32
+  %c5 = ptrtoint ptr %c to i32
+  %0 = sub i32 0, %c5
+  %uglygep = getelementptr i8, ptr %b, i32 %0
+  %exitcount.ptrcnt.to.int = ptrtoint ptr %uglygep to i32
   %n.rnd.up = add i32 %exitcount.ptrcnt.to.int, 15
   %n.vec = and i32 %n.rnd.up, -16
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %while.body.preheader
   %index = phi i32 [ 0, %while.body.preheader ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr i8, i8* %c, i32 %index
-  %2 = or i32 %index, 1
-  %next.gep7 = getelementptr i8, i8* %c, i32 %2
-  %3 = or i32 %index, 2
-  %next.gep8 = getelementptr i8, i8* %c, i32 %3
-  %4 = or i32 %index, 3
-  %next.gep9 = getelementptr i8, i8* %c, i32 %4
-  %5 = or i32 %index, 4
-  %next.gep10 = getelementptr i8, i8* %c, i32 %5
-  %6 = or i32 %index, 5
-  %next.gep11 = getelementptr i8, i8* %c, i32 %6
-  %7 = or i32 %index, 6
-  %next.gep12 = getelementptr i8, i8* %c, i32 %7
-  %8 = or i32 %index, 7
-  %next.gep13 = getelementptr i8, i8* %c, i32 %8
-  %9 = or i32 %index, 8
-  %next.gep14 = getelementptr i8, i8* %c, i32 %9
-  %10 = or i32 %index, 9
-  %next.gep15 = getelementptr i8, i8* %c, i32 %10
-  %11 = or i32 %index, 10
-  %next.gep16 = getelementptr i8, i8* %c, i32 %11
-  %12 = or i32 %index, 11
-  %next.gep17 = getelementptr i8, i8* %c, i32 %12
-  %13 = or i32 %index, 12
-  %next.gep18 = getelementptr i8, i8* %c, i32 %13
-  %14 = or i32 %index, 13
-  %next.gep19 = getelementptr i8, i8* %c, i32 %14
-  %15 = or i32 %index, 14
-  %next.gep20 = getelementptr i8, i8* %c, i32 %15
-  %16 = or i32 %index, 15
-  %next.gep21 = getelementptr i8, i8* %c, i32 %16
-  %17 = insertelement <16 x i8*> poison, i8* %next.gep, i32 0
-  %18 = insertelement <16 x i8*> %17, i8* %next.gep7, i32 1
-  %19 = insertelement <16 x i8*> %18, i8* %next.gep8, i32 2
-  %20 = insertelement <16 x i8*> %19, i8* %next.gep9, i32 3
-  %21 = insertelement <16 x i8*> %20, i8* %next.gep10, i32 4
-  %22 = insertelement <16 x i8*> %21, i8* %next.gep11, i32 5
-  %23 = insertelement <16 x i8*> %22, i8* %next.gep12, i32 6
-  %24 = insertelement <16 x i8*> %23, i8* %next.gep13, i32 7
-  %25 = insertelement <16 x i8*> %24, i8* %next.gep14, i32 8
-  %26 = insertelement <16 x i8*> %25, i8* %next.gep15, i32 9
-  %27 = insertelement <16 x i8*> %26, i8* %next.gep16, i32 10
-  %28 = insertelement <16 x i8*> %27, i8* %next.gep17, i32 11
-  %29 = insertelement <16 x i8*> %28, i8* %next.gep18, i32 12
-  %30 = insertelement <16 x i8*> %29, i8* %next.gep19, i32 13
-  %31 = insertelement <16 x i8*> %30, i8* %next.gep20, i32 14
-  %32 = insertelement <16 x i8*> %31, i8* %next.gep21, i32 15
+  %next.gep = getelementptr i8, ptr %c, i32 %index
+  %1 = or i32 %index, 1
+  %next.gep7 = getelementptr i8, ptr %c, i32 %1
+  %2 = or i32 %index, 2
+  %next.gep8 = getelementptr i8, ptr %c, i32 %2
+  %3 = or i32 %index, 3
+  %next.gep9 = getelementptr i8, ptr %c, i32 %3
+  %4 = or i32 %index, 4
+  %next.gep10 = getelementptr i8, ptr %c, i32 %4
+  %5 = or i32 %index, 5
+  %next.gep11 = getelementptr i8, ptr %c, i32 %5
+  %6 = or i32 %index, 6
+  %next.gep12 = getelementptr i8, ptr %c, i32 %6
+  %7 = or i32 %index, 7
+  %next.gep13 = getelementptr i8, ptr %c, i32 %7
+  %8 = or i32 %index, 8
+  %next.gep14 = getelementptr i8, ptr %c, i32 %8
+  %9 = or i32 %index, 9
+  %next.gep15 = getelementptr i8, ptr %c, i32 %9
+  %10 = or i32 %index, 10
+  %next.gep16 = getelementptr i8, ptr %c, i32 %10
+  %11 = or i32 %index, 11
+  %next.gep17 = getelementptr i8, ptr %c, i32 %11
+  %12 = or i32 %index, 12
+  %next.gep18 = getelementptr i8, ptr %c, i32 %12
+  %13 = or i32 %index, 13
+  %next.gep19 = getelementptr i8, ptr %c, i32 %13
+  %14 = or i32 %index, 14
+  %next.gep20 = getelementptr i8, ptr %c, i32 %14
+  %15 = or i32 %index, 15
+  %next.gep21 = getelementptr i8, ptr %c, i32 %15
+  %16 = insertelement <16 x ptr> poison, ptr %next.gep, i32 0
+  %17 = insertelement <16 x ptr> %16, ptr %next.gep7, i32 1
+  %18 = insertelement <16 x ptr> %17, ptr %next.gep8, i32 2
+  %19 = insertelement <16 x ptr> %18, ptr %next.gep9, i32 3
+  %20 = insertelement <16 x ptr> %19, ptr %next.gep10, i32 4
+  %21 = insertelement <16 x ptr> %20, ptr %next.gep11, i32 5
+  %22 = insertelement <16 x ptr> %21, ptr %next.gep12, i32 6
+  %23 = insertelement <16 x ptr> %22, ptr %next.gep13, i32 7
+  %24 = insertelement <16 x ptr> %23, ptr %next.gep14, i32 8
+  %25 = insertelement <16 x ptr> %24, ptr %next.gep15, i32 9
+  %26 = insertelement <16 x ptr> %25, ptr %next.gep16, i32 10
+  %27 = insertelement <16 x ptr> %26, ptr %next.gep17, i32 11
+  %28 = insertelement <16 x ptr> %27, ptr %next.gep18, i32 12
+  %29 = insertelement <16 x ptr> %28, ptr %next.gep19, i32 13
+  %30 = insertelement <16 x ptr> %29, ptr %next.gep20, i32 14
+  %31 = insertelement <16 x ptr> %30, ptr %next.gep21, i32 15
   %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %exitcount.ptrcnt.to.int)
-  %33 = ptrtoint <16 x i8*> %32 to <16 x i32>
-  %34 = trunc <16 x i32> %33 to <16 x i8>
-  %35 = bitcast i8* %next.gep to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %34, <16 x i8>* %35, i32 1, <16 x i1> %active.lane.mask)
+  %32 = ptrtoint <16 x ptr> %31 to <16 x i32>
+  %33 = trunc <16 x i32> %32 to <16 x i8>
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %33, ptr %next.gep, i32 1, <16 x i1> %active.lane.mask)
   %index.next = add i32 %index, 16
-  %36 = icmp eq i32 %index.next, %n.vec
-  br i1 %36, label %while.end, label %vector.body
+  %34 = icmp eq i32 %index.next, %n.vec
+  br i1 %34, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body, %entry
   ret i32 undef
 }
 
 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vabd.ll b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
index e18ca2d82118b..f209a76d82e80 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve %s -o - | FileCheck %s --check-prefix=CHECK-MVE
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s --check-prefix=CHECK-MVEFP
 
-define arm_aapcs_vfpcc void @vabd_v4f32(<4 x float> %x, <4 x float> %y, <4 x float>* %z) {
+define arm_aapcs_vfpcc void @vabd_v4f32(<4 x float> %x, <4 x float> %y, ptr %z) {
 ; CHECK-MVE-LABEL: vabd_v4f32:
 ; CHECK-MVE:       @ %bb.0: @ %entry
 ; CHECK-MVE-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -51,11 +51,11 @@ define arm_aapcs_vfpcc void @vabd_v4f32(<4 x float> %x, <4 x float> %y, <4 x flo
 entry:
   %0 = fsub <4 x float> %x, %y
   %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* %z, align 4
+  store <4 x float> %1, ptr %z, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %z) {
+define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
 ; CHECK-MVE-LABEL: vabd_v8f16:
 ; CHECK-MVE:       @ %bb.0: @ %entry
 ; CHECK-MVE-NEXT:    .save {r4, r5, r6, lr}
@@ -159,7 +159,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>
 entry:
   %0 = fsub <8 x half> %x, %y
   %1 = call <8 x half> @llvm.fabs.v8f16(<8 x half> %0)
-  store <8 x half> %1, <8 x half>* %z
+  store <8 x half> %1, ptr %z
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
index 9819a8253f345..b832d52711e97 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
@@ -284,7 +284,7 @@ define arm_aapcs_vfpcc <2 x i32> @vabd_v2u32(<2 x i32> %src1, <2 x i32> %src2) {
   ret <2 x i32> %result
 }
 
-define void @vabd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -304,31 +304,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = sext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load22 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = sext <16 x i8> %wide.load22 to <16 x i32>
-  %6 = sub nsw <16 x i32> %2, %5
-  %7 = icmp slt <16 x i32> %6, zeroinitializer
-  %8 = sub nsw <16 x i32> zeroinitializer, %6
-  %9 = select <16 x i1> %7, <16 x i32> %8, <16 x i32> %6
-  %10 = trunc <16 x i32> %9 to <16 x i8>
-  %11 = getelementptr inbounds i8, i8* %z, i32 %index
-  %12 = bitcast i8* %11 to <16 x i8>*
-  store <16 x i8> %10, <16 x i8>* %12, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = sext <16 x i8> %wide.load to <16 x i32>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load22 = load <16 x i8>, ptr %2, align 1
+  %3 = sext <16 x i8> %wide.load22 to <16 x i32>
+  %4 = sub nsw <16 x i32> %1, %3
+  %5 = icmp slt <16 x i32> %4, zeroinitializer
+  %6 = sub nsw <16 x i32> zeroinitializer, %4
+  %7 = select <16 x i1> %5, <16 x i32> %6, <16 x i32> %4
+  %8 = trunc <16 x i32> %7 to <16 x i8>
+  %9 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %8, ptr %9, align 1
   %index.next = add i32 %index, 16
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vabd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -348,31 +345,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = sext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load22 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load22 to <8 x i32>
-  %6 = sub nsw <8 x i32> %2, %5
-  %7 = icmp slt <8 x i32> %6, zeroinitializer
-  %8 = sub nsw <8 x i32> zeroinitializer, %6
-  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
-  %10 = trunc <8 x i32> %9 to <8 x i16>
-  %11 = getelementptr inbounds i16, i16* %z, i32 %index
-  %12 = bitcast i16* %11 to <8 x i16>*
-  store <8 x i16> %10, <8 x i16>* %12, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = sext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load22 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load22 to <8 x i32>
+  %4 = sub nsw <8 x i32> %1, %3
+  %5 = icmp slt <8 x i32> %4, zeroinitializer
+  %6 = sub nsw <8 x i32> zeroinitializer, %4
+  %7 = select <8 x i1> %5, <8 x i32> %6, <8 x i32> %4
+  %8 = trunc <8 x i32> %7 to <8 x i16>
+  %9 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vabd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, lr}
@@ -432,31 +426,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = sext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load23 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = sext <4 x i32> %wide.load23 to <4 x i64>
-  %6 = sub nsw <4 x i64> %2, %5
-  %7 = icmp slt <4 x i64> %6, zeroinitializer
-  %8 = trunc <4 x i64> %6 to <4 x i32>
-  %9 = sub <4 x i32> zeroinitializer, %8
-  %10 = select <4 x i1> %7, <4 x i32> %9, <4 x i32> %8
-  %11 = getelementptr inbounds i32, i32* %z, i32 %index
-  %12 = bitcast i32* %11 to <4 x i32>*
-  store <4 x i32> %10, <4 x i32>* %12, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = sext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load23 = load <4 x i32>, ptr %2, align 4
+  %3 = sext <4 x i32> %wide.load23 to <4 x i64>
+  %4 = sub nsw <4 x i64> %1, %3
+  %5 = icmp slt <4 x i64> %4, zeroinitializer
+  %6 = trunc <4 x i64> %4 to <4 x i32>
+  %7 = sub <4 x i32> zeroinitializer, %6
+  %8 = select <4 x i1> %5, <4 x i32> %7, <4 x i32> %6
+  %9 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %8, ptr %9, align 4
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vabd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -476,31 +467,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = zext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load22 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load22 to <16 x i32>
-  %6 = sub nsw <16 x i32> %2, %5
-  %7 = icmp slt <16 x i32> %6, zeroinitializer
-  %8 = sub nsw <16 x i32> zeroinitializer, %6
-  %9 = select <16 x i1> %7, <16 x i32> %8, <16 x i32> %6
-  %10 = trunc <16 x i32> %9 to <16 x i8>
-  %11 = getelementptr inbounds i8, i8* %z, i32 %index
-  %12 = bitcast i8* %11 to <16 x i8>*
-  store <16 x i8> %10, <16 x i8>* %12, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = zext <16 x i8> %wide.load to <16 x i32>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load22 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load22 to <16 x i32>
+  %4 = sub nsw <16 x i32> %1, %3
+  %5 = icmp slt <16 x i32> %4, zeroinitializer
+  %6 = sub nsw <16 x i32> zeroinitializer, %4
+  %7 = select <16 x i1> %5, <16 x i32> %6, <16 x i32> %4
+  %8 = trunc <16 x i32> %7 to <16 x i8>
+  %9 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %8, ptr %9, align 1
   %index.next = add i32 %index, 16
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vabd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -520,31 +508,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = zext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load22 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = zext <8 x i16> %wide.load22 to <8 x i32>
-  %6 = sub nsw <8 x i32> %2, %5
-  %7 = icmp slt <8 x i32> %6, zeroinitializer
-  %8 = sub nsw <8 x i32> zeroinitializer, %6
-  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
-  %10 = trunc <8 x i32> %9 to <8 x i16>
-  %11 = getelementptr inbounds i16, i16* %z, i32 %index
-  %12 = bitcast i16* %11 to <8 x i16>*
-  store <8 x i16> %10, <8 x i16>* %12, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = zext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load22 = load <8 x i16>, ptr %2, align 2
+  %3 = zext <8 x i16> %wide.load22 to <8 x i32>
+  %4 = sub nsw <8 x i32> %1, %3
+  %5 = icmp slt <8 x i32> %4, zeroinitializer
+  %6 = sub nsw <8 x i32> zeroinitializer, %4
+  %7 = select <8 x i1> %5, <8 x i32> %6, <8 x i32> %4
+  %8 = trunc <8 x i32> %7 to <8 x i16>
+  %9 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vabd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vabd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vabd_loop_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -608,25 +593,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = zext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load23 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = zext <4 x i32> %wide.load23 to <4 x i64>
-  %6 = sub nsw <4 x i64> %2, %5
-  %7 = icmp slt <4 x i64> %6, zeroinitializer
-  %8 = trunc <4 x i64> %6 to <4 x i32>
-  %9 = sub <4 x i32> zeroinitializer, %8
-  %10 = select <4 x i1> %7, <4 x i32> %9, <4 x i32> %8
-  %11 = getelementptr inbounds i32, i32* %z, i32 %index
-  %12 = bitcast i32* %11 to <4 x i32>*
-  store <4 x i32> %10, <4 x i32>* %12, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = zext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load23 = load <4 x i32>, ptr %2, align 4
+  %3 = zext <4 x i32> %wide.load23 to <4 x i64>
+  %4 = sub nsw <4 x i64> %1, %3
+  %5 = icmp slt <4 x i64> %4, zeroinitializer
+  %6 = trunc <4 x i64> %4 to <4 x i32>
+  %7 = sub <4 x i32> zeroinitializer, %6
+  %8 = select <4 x i1> %5, <4 x i32> %7, <4 x i32> %6
+  %9 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %8, ptr %9, align 4
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vctp.ll b/llvm/test/CodeGen/Thumb2/mve-vctp.ll
index 04a82ebb75969..02f7de5a07244 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vctp.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vctp.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs %s -o - | FileCheck %s
 
-define void @vctp8(i32 %arg, <16 x i8> *%in, <16 x i8>* %out) {
+define void @vctp8(i32 %arg, ptr %in, ptr %out) {
 ; CHECK-LABEL: vctp8:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
@@ -12,13 +12,13 @@ define void @vctp8(i32 %arg, <16 x i8> *%in, <16 x i8>* %out) {
 ; CHECK-NEXT:    vstrw.32 q0, [r2]
 ; CHECK-NEXT:    bx lr
   %pred = call <16 x i1> @llvm.arm.mve.vctp8(i32 %arg)
-  %ld = load <16 x i8>, <16 x i8>* %in
+  %ld = load <16 x i8>, ptr %in
   %res = select <16 x i1> %pred, <16 x i8> %ld, <16 x i8> zeroinitializer
-  store <16 x i8> %res, <16 x i8>* %out
+  store <16 x i8> %res, ptr %out
   ret void
 }
 
-define void @vctp16(i32 %arg, <8 x i16> *%in, <8 x i16>* %out) {
+define void @vctp16(i32 %arg, ptr %in, ptr %out) {
 ; CHECK-LABEL: vctp16:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
@@ -29,13 +29,13 @@ define void @vctp16(i32 %arg, <8 x i16> *%in, <8 x i16>* %out) {
 ; CHECK-NEXT:    vstrw.32 q0, [r2]
 ; CHECK-NEXT:    bx lr
   %pred = call <8 x i1> @llvm.arm.mve.vctp16(i32 %arg)
-  %ld = load <8 x i16>, <8 x i16>* %in
+  %ld = load <8 x i16>, ptr %in
   %res = select <8 x i1> %pred, <8 x i16> %ld, <8 x i16> zeroinitializer
-  store <8 x i16> %res, <8 x i16>* %out
+  store <8 x i16> %res, ptr %out
   ret void
 }
 
-define void @vctp32(i32 %arg, <4 x i32> *%in, <4 x i32>* %out) {
+define void @vctp32(i32 %arg, ptr %in, ptr %out) {
 ; CHECK-LABEL: vctp32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
@@ -46,13 +46,13 @@ define void @vctp32(i32 %arg, <4 x i32> *%in, <4 x i32>* %out) {
 ; CHECK-NEXT:    vstrw.32 q0, [r2]
 ; CHECK-NEXT:    bx lr
   %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %arg)
-  %ld = load <4 x i32>, <4 x i32>* %in
+  %ld = load <4 x i32>, ptr %in
   %res = select <4 x i1> %pred, <4 x i32> %ld, <4 x i32> zeroinitializer
-  store <4 x i32> %res, <4 x i32>* %out
+  store <4 x i32> %res, ptr %out
   ret void
 }
 
-define void @vctp64(i32 %arg, <2 x i64> *%in, <2 x i64>* %out) {
+define void @vctp64(i32 %arg, ptr %in, ptr %out) {
 ; CHECK-LABEL: vctp64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
@@ -63,9 +63,9 @@ define void @vctp64(i32 %arg, <2 x i64> *%in, <2 x i64>* %out) {
 ; CHECK-NEXT:    vstrw.32 q0, [r2]
 ; CHECK-NEXT:    bx lr
   %pred = call <2 x i1> @llvm.arm.mve.vctp64(i32 %arg)
-  %ld = load <2 x i64>, <2 x i64>* %in
+  %ld = load <2 x i64>, ptr %in
   %res = select <2 x i1> %pred, <2 x i64> %ld, <2 x i64> zeroinitializer
-  store <2 x i64> %res, <2 x i64>* %out
+  store <2 x i64> %res, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll b/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
index 6ab9c80aba3ec..9711a5c5aef87 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
@@ -181,19 +181,19 @@ entry:
 
 
 
-define arm_aapcs_vfpcc <4 x float> @load_ext_4(<4 x half>* %src) {
+define arm_aapcs_vfpcc <4 x float> @load_ext_4(ptr %src) {
 ; CHECK-LABEL: load_ext_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
 ; CHECK-NEXT:    vcvtb.f32.f16 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x half>, <4 x half>* %src, align 4
+  %wide.load = load <4 x half>, ptr %src, align 4
   %e = fpext <4 x half> %wide.load to <4 x float>
   ret <4 x float> %e
 }
 
-define arm_aapcs_vfpcc <8 x float> @load_ext_8(<8 x half>* %src) {
+define arm_aapcs_vfpcc <8 x float> @load_ext_8(ptr %src) {
 ; CHECK-LABEL: load_ext_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -202,12 +202,12 @@ define arm_aapcs_vfpcc <8 x float> @load_ext_8(<8 x half>* %src) {
 ; CHECK-NEXT:    vcvtb.f32.f16 q1, q1
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x half>, <8 x half>* %src, align 4
+  %wide.load = load <8 x half>, ptr %src, align 4
   %e = fpext <8 x half> %wide.load to <8 x float>
   ret <8 x float> %e
 }
 
-define arm_aapcs_vfpcc <16 x float> @load_ext_16(<16 x half>* %src) {
+define arm_aapcs_vfpcc <16 x float> @load_ext_16(ptr %src) {
 ; CHECK-LABEL: load_ext_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -220,25 +220,25 @@ define arm_aapcs_vfpcc <16 x float> @load_ext_16(<16 x half>* %src) {
 ; CHECK-NEXT:    vcvtb.f32.f16 q3, q3
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x half>, <16 x half>* %src, align 4
+  %wide.load = load <16 x half>, ptr %src, align 4
   %e = fpext <16 x half> %wide.load to <16 x float>
   ret <16 x float> %e
 }
 
-define arm_aapcs_vfpcc <4 x float> @load_shuffleext_8(<8 x half>* %src) {
+define arm_aapcs_vfpcc <4 x float> @load_shuffleext_8(ptr %src) {
 ; CHECK-LABEL: load_shuffleext_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vcvtb.f32.f16 q0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x half>, <8 x half>* %src, align 4
+  %wide.load = load <8 x half>, ptr %src, align 4
   %sh = shufflevector <8 x half> %wide.load, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %e = fpext <4 x half> %sh to <4 x float>
   ret <4 x float> %e
 }
 
-define arm_aapcs_vfpcc <8 x float> @load_shuffleext_16(<16 x half>* %src) {
+define arm_aapcs_vfpcc <8 x float> @load_shuffleext_16(ptr %src) {
 ; CHECK-LABEL: load_shuffleext_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q2, q3}, [r0]
@@ -253,7 +253,7 @@ define arm_aapcs_vfpcc <8 x float> @load_shuffleext_16(<16 x half>* %src) {
 ; CHECK-NEXT:    vcvtb.f32.f16 s4, s10
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x half>, <16 x half>* %src, align 4
+  %wide.load = load <16 x half>, ptr %src, align 4
   %sh = shufflevector <16 x half> %wide.load, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %e = fpext <8 x half> %sh to <8 x float>
   ret <8 x float> %e
@@ -262,7 +262,7 @@ entry:
 
 
 
-define arm_aapcs_vfpcc void @store_trunc_4(<4 x half>* %src, <4 x float> %val) {
+define arm_aapcs_vfpcc void @store_trunc_4(ptr %src, <4 x float> %val) {
 ; CHECK-LABEL: store_trunc_4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vcvtb.f16.f32 q0, q0
@@ -270,11 +270,11 @@ define arm_aapcs_vfpcc void @store_trunc_4(<4 x half>* %src, <4 x float> %val) {
 ; CHECK-NEXT:    bx lr
 entry:
   %e = fptrunc <4 x float> %val to <4 x half>
-  store <4 x half> %e, <4 x half>* %src, align 4
+  store <4 x half> %e, ptr %src, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_trunc_8(<8 x half>* %src, <8 x float> %val) {
+define arm_aapcs_vfpcc void @store_trunc_8(ptr %src, <8 x float> %val) {
 ; CHECK-LABEL: store_trunc_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vcvtb.f16.f32 q1, q1
@@ -284,11 +284,11 @@ define arm_aapcs_vfpcc void @store_trunc_8(<8 x half>* %src, <8 x float> %val) {
 ; CHECK-NEXT:    bx lr
 entry:
   %e = fptrunc <8 x float> %val to <8 x half>
-  store <8 x half> %e, <8 x half>* %src, align 4
+  store <8 x half> %e, ptr %src, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_trunc_16(<16 x half>* %src, <16 x float> %val) {
+define arm_aapcs_vfpcc void @store_trunc_16(ptr %src, <16 x float> %val) {
 ; CHECK-LABEL: store_trunc_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vcvtb.f16.f32 q3, q3
@@ -302,11 +302,11 @@ define arm_aapcs_vfpcc void @store_trunc_16(<16 x half>* %src, <16 x float> %val
 ; CHECK-NEXT:    bx lr
 entry:
   %e = fptrunc <16 x float> %val to <16 x half>
-  store <16 x half> %e, <16 x half>* %src, align 4
+  store <16 x half> %e, ptr %src, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_shuffletrunc_8(<8 x half>* %src, <4 x float> %val1, <4 x float> %val2) {
+define arm_aapcs_vfpcc void @store_shuffletrunc_8(ptr %src, <4 x float> %val1, <4 x float> %val2) {
 ; CHECK-LABEL: store_shuffletrunc_8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vcvtb.f16.f32 q0, q0
@@ -316,11 +316,11 @@ define arm_aapcs_vfpcc void @store_shuffletrunc_8(<8 x half>* %src, <4 x float>
 entry:
   %strided.vec = shufflevector <4 x float> %val1, <4 x float> %val2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
   %out = fptrunc <8 x float> %strided.vec to <8 x half>
-  store <8 x half> %out, <8 x half>* %src, align 4
+  store <8 x half> %out, ptr %src, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @store_shuffletrunc_16(<16 x half>* %src, <8 x float> %val1, <8 x float> %val2) {
+define arm_aapcs_vfpcc void @store_shuffletrunc_16(ptr %src, <8 x float> %val1, <8 x float> %val2) {
 ; CHECK-LABEL: store_shuffletrunc_16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vcvtb.f16.f32 q1, q1
@@ -333,6 +333,6 @@ define arm_aapcs_vfpcc void @store_shuffletrunc_16(<16 x half>* %src, <8 x float
 entry:
   %strided.vec = shufflevector <8 x float> %val1, <8 x float> %val2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %out = fptrunc <16 x float> %strided.vec to <16 x half>
-  store <16 x half> %out, <16 x half>* %src, align 4
+  store <16 x half> %out, ptr %src, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
index 0ce5474c85198..2dec589448d2d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
@@ -631,7 +631,7 @@ entry:
   ret i64 %z
 }
 
-define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext_load(<16 x i8> *%xp, <16 x i8> *%yp) {
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext_load(ptr %xp, ptr %yp) {
 ; CHECK-LABEL: add_v16i8_v16i64_zext_load:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -644,8 +644,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext_load(<16 x i8> *%xp, <16 x i8>
 ; CHECK-NEXT:    mov r1, r3
 ; CHECK-NEXT:    bx lr
 entry:
-  %x = load <16 x i8>, <16 x i8>* %xp
-  %y = load <16 x i8>, <16 x i8>* %yp
+  %x = load <16 x i8>, ptr %xp
+  %y = load <16 x i8>, ptr %yp
   %xx = zext <16 x i8> %x to <16 x i64>
   %yy = zext <16 x i8> %y to <16 x i64>
   %m = mul <16 x i64> %xx, %yy
@@ -653,7 +653,7 @@ entry:
   ret i64 %z
 }
 
-define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext_load(<16 x i8> *%xp, <16 x i8> *%yp) {
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext_load(ptr %xp, ptr %yp) {
 ; CHECK-LABEL: add_v16i8_v16i64_sext_load:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r1]
@@ -666,8 +666,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext_load(<16 x i8> *%xp, <16 x i8>
 ; CHECK-NEXT:    mov r1, r3
 ; CHECK-NEXT:    bx lr
 entry:
-  %x = load <16 x i8>, <16 x i8>* %xp
-  %y = load <16 x i8>, <16 x i8>* %yp
+  %x = load <16 x i8>, ptr %xp
+  %y = load <16 x i8>, ptr %yp
   %xx = sext <16 x i8> %x to <16 x i64>
   %yy = sext <16 x i8> %y to <16 x i64>
   %m = mul <16 x i64> %xx, %yy
@@ -1391,7 +1391,7 @@ entry:
   ret i64 %r
 }
 
-define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext_load(<16 x i8> *%xp, <16 x i8> *%yp, i64 %a) {
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext_load(ptr %xp, ptr %yp, i64 %a) {
 ; CHECK-LABEL: add_v16i8_v16i64_acc_zext_load:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -1404,8 +1404,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext_load(<16 x i8> *%xp, <16 x
 ; CHECK-NEXT:    mov r1, r3
 ; CHECK-NEXT:    bx lr
 entry:
-  %x = load <16 x i8>, <16 x i8>* %xp
-  %y = load <16 x i8>, <16 x i8>* %yp
+  %x = load <16 x i8>, ptr %xp
+  %y = load <16 x i8>, ptr %yp
   %xx = zext <16 x i8> %x to <16 x i64>
   %yy = zext <16 x i8> %y to <16 x i64>
   %m = mul <16 x i64> %xx, %yy
@@ -1414,7 +1414,7 @@ entry:
   ret i64 %r
 }
 
-define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext_load(<16 x i8> *%xp, <16 x i8> *%yp, i64 %a) {
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext_load(ptr %xp, ptr %yp, i64 %a) {
 ; CHECK-LABEL: add_v16i8_v16i64_acc_sext_load:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r1]
@@ -1427,8 +1427,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext_load(<16 x i8> *%xp, <16 x
 ; CHECK-NEXT:    mov r1, r3
 ; CHECK-NEXT:    bx lr
 entry:
-  %x = load <16 x i8>, <16 x i8>* %xp
-  %y = load <16 x i8>, <16 x i8>* %yp
+  %x = load <16 x i8>, ptr %xp
+  %y = load <16 x i8>, ptr %yp
   %xx = sext <16 x i8> %x to <16 x i64>
   %yy = sext <16 x i8> %y to <16 x i64>
   %m = mul <16 x i64> %xx, %yy

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-slp.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-slp.ll
index 465457bee0128..78693d55e1a6a 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-slp.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-slp.ll
@@ -4,34 +4,33 @@
 ; Various reductions generated fro SLP vectorizing unrolled loops. Generated
 ; from https://godbolt.org/z/ebxdPh1Kz with some less interesting cases removed.
 
-define i32 @addv2i32i32(i32* %x) {
+define i32 @addv2i32i32(ptr %x) {
 ; CHECK-LABEL: addv2i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r1, r0, [r0]
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %x, align 4
-  %arrayidx.1 = getelementptr inbounds i32, i32* %x, i32 1
-  %1 = load i32, i32* %arrayidx.1, align 4
+  %0 = load i32, ptr %x, align 4
+  %arrayidx.1 = getelementptr inbounds i32, ptr %x, i32 1
+  %1 = load i32, ptr %arrayidx.1, align 4
   %add.1 = add nsw i32 %1, %0
   ret i32 %add.1
 }
 
-define i32 @addv4i32i32(i32* %x) {
+define i32 @addv4i32i32(ptr %x) {
 ; CHECK-LABEL: addv4i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
-  ret i32 %2
+  %0 = load <4 x i32>, ptr %x, align 4
+  %1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
+  ret i32 %1
 }
 
-define i32 @addv8i32i32(i32* %x) {
+define i32 @addv8i32i32(ptr %x) {
 ; CHECK-LABEL: addv8i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -40,13 +39,12 @@ define i32 @addv8i32i32(i32* %x) {
 ; CHECK-NEXT:    vaddva.u32 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <8 x i32>*
-  %1 = load <8 x i32>, <8 x i32>* %0, align 4
-  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
-  ret i32 %2
+  %0 = load <8 x i32>, ptr %x, align 4
+  %1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %0)
+  ret i32 %1
 }
 
-define i32 @addv16i32i32(i32* %x) {
+define i32 @addv16i32i32(ptr %x) {
 ; CHECK-LABEL: addv16i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -60,13 +58,12 @@ define i32 @addv16i32i32(i32* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <16 x i32>*
-  %1 = load <16 x i32>, <16 x i32>* %0, align 4
-  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
-  ret i32 %2
+  %0 = load <16 x i32>, ptr %x, align 4
+  %1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %0)
+  ret i32 %1
 }
 
-define i32 @addv24i32i32(i32* %x) {
+define i32 @addv24i32i32(ptr %x) {
 ; CHECK-LABEL: addv24i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -84,18 +81,16 @@ define i32 @addv24i32i32(i32* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <8 x i32>*
-  %1 = load <8 x i32>, <8 x i32>* %0, align 4
-  %arrayidx.8 = getelementptr inbounds i32, i32* %x, i32 8
-  %2 = bitcast i32* %arrayidx.8 to <16 x i32>*
-  %3 = load <16 x i32>, <16 x i32>* %2, align 4
-  %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
-  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
-  %op.rdx = add nsw i32 %4, %5
+  %0 = load <8 x i32>, ptr %x, align 4
+  %arrayidx.8 = getelementptr inbounds i32, ptr %x, i32 8
+  %1 = load <16 x i32>, ptr %arrayidx.8, align 4
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %0)
+  %op.rdx = add nsw i32 %2, %3
   ret i32 %op.rdx
 }
 
-define i32 @addv32i32i32(i32* %x) {
+define i32 @addv32i32i32(ptr %x) {
 ; CHECK-LABEL: addv32i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -117,13 +112,12 @@ define i32 @addv32i32i32(i32* %x) {
 ; CHECK-NEXT:    vaddva.u32 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <32 x i32>*
-  %1 = load <32 x i32>, <32 x i32>* %0, align 4
-  %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
-  ret i32 %2
+  %0 = load <32 x i32>, ptr %x, align 4
+  %1 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %0)
+  ret i32 %1
 }
 
-define i32 @addv64i32i32(i32* %x) {
+define i32 @addv64i32i32(ptr %x) {
 ; CHECK-LABEL: addv64i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -161,13 +155,12 @@ define i32 @addv64i32i32(i32* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <64 x i32>*
-  %1 = load <64 x i32>, <64 x i32>* %0, align 4
-  %2 = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %1)
-  ret i32 %2
+  %0 = load <64 x i32>, ptr %x, align 4
+  %1 = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %0)
+  ret i32 %1
 }
 
-define i32 @addv128i32i32(i32* %x) {
+define i32 @addv128i32i32(ptr %x) {
 ; CHECK-LABEL: addv128i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -237,168 +230,136 @@ define i32 @addv128i32i32(i32* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %0, align 4
-  %1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load)
-  %2 = getelementptr inbounds i32, i32* %x, i32 4
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load.1 = load <4 x i32>, <4 x i32>* %3, align 4
-  %4 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.1)
-  %5 = add i32 %4, %1
-  %6 = getelementptr inbounds i32, i32* %x, i32 8
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load.2 = load <4 x i32>, <4 x i32>* %7, align 4
-  %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.2)
-  %9 = add i32 %8, %5
-  %10 = getelementptr inbounds i32, i32* %x, i32 12
-  %11 = bitcast i32* %10 to <4 x i32>*
-  %wide.load.3 = load <4 x i32>, <4 x i32>* %11, align 4
-  %12 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.3)
-  %13 = add i32 %12, %9
-  %14 = getelementptr inbounds i32, i32* %x, i32 16
-  %15 = bitcast i32* %14 to <4 x i32>*
-  %wide.load.4 = load <4 x i32>, <4 x i32>* %15, align 4
-  %16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.4)
-  %17 = add i32 %16, %13
-  %18 = getelementptr inbounds i32, i32* %x, i32 20
-  %19 = bitcast i32* %18 to <4 x i32>*
-  %wide.load.5 = load <4 x i32>, <4 x i32>* %19, align 4
-  %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.5)
-  %21 = add i32 %20, %17
-  %22 = getelementptr inbounds i32, i32* %x, i32 24
-  %23 = bitcast i32* %22 to <4 x i32>*
-  %wide.load.6 = load <4 x i32>, <4 x i32>* %23, align 4
-  %24 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.6)
-  %25 = add i32 %24, %21
-  %26 = getelementptr inbounds i32, i32* %x, i32 28
-  %27 = bitcast i32* %26 to <4 x i32>*
-  %wide.load.7 = load <4 x i32>, <4 x i32>* %27, align 4
-  %28 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.7)
-  %29 = add i32 %28, %25
-  %30 = getelementptr inbounds i32, i32* %x, i32 32
-  %31 = bitcast i32* %30 to <4 x i32>*
-  %wide.load.8 = load <4 x i32>, <4 x i32>* %31, align 4
-  %32 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.8)
-  %33 = add i32 %32, %29
-  %34 = getelementptr inbounds i32, i32* %x, i32 36
-  %35 = bitcast i32* %34 to <4 x i32>*
-  %wide.load.9 = load <4 x i32>, <4 x i32>* %35, align 4
-  %36 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.9)
-  %37 = add i32 %36, %33
-  %38 = getelementptr inbounds i32, i32* %x, i32 40
-  %39 = bitcast i32* %38 to <4 x i32>*
-  %wide.load.10 = load <4 x i32>, <4 x i32>* %39, align 4
-  %40 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.10)
-  %41 = add i32 %40, %37
-  %42 = getelementptr inbounds i32, i32* %x, i32 44
-  %43 = bitcast i32* %42 to <4 x i32>*
-  %wide.load.11 = load <4 x i32>, <4 x i32>* %43, align 4
-  %44 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.11)
-  %45 = add i32 %44, %41
-  %46 = getelementptr inbounds i32, i32* %x, i32 48
-  %47 = bitcast i32* %46 to <4 x i32>*
-  %wide.load.12 = load <4 x i32>, <4 x i32>* %47, align 4
-  %48 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.12)
-  %49 = add i32 %48, %45
-  %50 = getelementptr inbounds i32, i32* %x, i32 52
-  %51 = bitcast i32* %50 to <4 x i32>*
-  %wide.load.13 = load <4 x i32>, <4 x i32>* %51, align 4
-  %52 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.13)
-  %53 = add i32 %52, %49
-  %54 = getelementptr inbounds i32, i32* %x, i32 56
-  %55 = bitcast i32* %54 to <4 x i32>*
-  %wide.load.14 = load <4 x i32>, <4 x i32>* %55, align 4
-  %56 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.14)
-  %57 = add i32 %56, %53
-  %58 = getelementptr inbounds i32, i32* %x, i32 60
-  %59 = bitcast i32* %58 to <4 x i32>*
-  %wide.load.15 = load <4 x i32>, <4 x i32>* %59, align 4
-  %60 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.15)
-  %61 = add i32 %60, %57
-  %62 = getelementptr inbounds i32, i32* %x, i32 64
-  %63 = bitcast i32* %62 to <4 x i32>*
-  %wide.load.16 = load <4 x i32>, <4 x i32>* %63, align 4
-  %64 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.16)
-  %65 = add i32 %64, %61
-  %66 = getelementptr inbounds i32, i32* %x, i32 68
-  %67 = bitcast i32* %66 to <4 x i32>*
-  %wide.load.17 = load <4 x i32>, <4 x i32>* %67, align 4
-  %68 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.17)
-  %69 = add i32 %68, %65
-  %70 = getelementptr inbounds i32, i32* %x, i32 72
-  %71 = bitcast i32* %70 to <4 x i32>*
-  %wide.load.18 = load <4 x i32>, <4 x i32>* %71, align 4
-  %72 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.18)
-  %73 = add i32 %72, %69
-  %74 = getelementptr inbounds i32, i32* %x, i32 76
-  %75 = bitcast i32* %74 to <4 x i32>*
-  %wide.load.19 = load <4 x i32>, <4 x i32>* %75, align 4
-  %76 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.19)
-  %77 = add i32 %76, %73
-  %78 = getelementptr inbounds i32, i32* %x, i32 80
-  %79 = bitcast i32* %78 to <4 x i32>*
-  %wide.load.20 = load <4 x i32>, <4 x i32>* %79, align 4
-  %80 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.20)
-  %81 = add i32 %80, %77
-  %82 = getelementptr inbounds i32, i32* %x, i32 84
-  %83 = bitcast i32* %82 to <4 x i32>*
-  %wide.load.21 = load <4 x i32>, <4 x i32>* %83, align 4
-  %84 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.21)
-  %85 = add i32 %84, %81
-  %86 = getelementptr inbounds i32, i32* %x, i32 88
-  %87 = bitcast i32* %86 to <4 x i32>*
-  %wide.load.22 = load <4 x i32>, <4 x i32>* %87, align 4
-  %88 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.22)
-  %89 = add i32 %88, %85
-  %90 = getelementptr inbounds i32, i32* %x, i32 92
-  %91 = bitcast i32* %90 to <4 x i32>*
-  %wide.load.23 = load <4 x i32>, <4 x i32>* %91, align 4
-  %92 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.23)
-  %93 = add i32 %92, %89
-  %94 = getelementptr inbounds i32, i32* %x, i32 96
-  %95 = bitcast i32* %94 to <4 x i32>*
-  %wide.load.24 = load <4 x i32>, <4 x i32>* %95, align 4
-  %96 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.24)
-  %97 = add i32 %96, %93
-  %98 = getelementptr inbounds i32, i32* %x, i32 100
-  %99 = bitcast i32* %98 to <4 x i32>*
-  %wide.load.25 = load <4 x i32>, <4 x i32>* %99, align 4
-  %100 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.25)
-  %101 = add i32 %100, %97
-  %102 = getelementptr inbounds i32, i32* %x, i32 104
-  %103 = bitcast i32* %102 to <4 x i32>*
-  %wide.load.26 = load <4 x i32>, <4 x i32>* %103, align 4
-  %104 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.26)
-  %105 = add i32 %104, %101
-  %106 = getelementptr inbounds i32, i32* %x, i32 108
-  %107 = bitcast i32* %106 to <4 x i32>*
-  %wide.load.27 = load <4 x i32>, <4 x i32>* %107, align 4
-  %108 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.27)
-  %109 = add i32 %108, %105
-  %110 = getelementptr inbounds i32, i32* %x, i32 112
-  %111 = bitcast i32* %110 to <4 x i32>*
-  %wide.load.28 = load <4 x i32>, <4 x i32>* %111, align 4
-  %112 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.28)
-  %113 = add i32 %112, %109
-  %114 = getelementptr inbounds i32, i32* %x, i32 116
-  %115 = bitcast i32* %114 to <4 x i32>*
-  %wide.load.29 = load <4 x i32>, <4 x i32>* %115, align 4
-  %116 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.29)
-  %117 = add i32 %116, %113
-  %118 = getelementptr inbounds i32, i32* %x, i32 120
-  %119 = bitcast i32* %118 to <4 x i32>*
-  %wide.load.30 = load <4 x i32>, <4 x i32>* %119, align 4
-  %120 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.30)
-  %121 = add i32 %120, %117
-  %122 = getelementptr inbounds i32, i32* %x, i32 124
-  %123 = bitcast i32* %122 to <4 x i32>*
-  %wide.load.31 = load <4 x i32>, <4 x i32>* %123, align 4
-  %124 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.31)
-  %125 = add i32 %124, %121
-  ret i32 %125
-}
-
-define i32 @addv2i32i16(i16* %x) {
+  %wide.load = load <4 x i32>, ptr %x, align 4
+  %0 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load)
+  %1 = getelementptr inbounds i32, ptr %x, i32 4
+  %wide.load.1 = load <4 x i32>, ptr %1, align 4
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.1)
+  %3 = add i32 %2, %0
+  %4 = getelementptr inbounds i32, ptr %x, i32 8
+  %wide.load.2 = load <4 x i32>, ptr %4, align 4
+  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.2)
+  %6 = add i32 %5, %3
+  %7 = getelementptr inbounds i32, ptr %x, i32 12
+  %wide.load.3 = load <4 x i32>, ptr %7, align 4
+  %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.3)
+  %9 = add i32 %8, %6
+  %10 = getelementptr inbounds i32, ptr %x, i32 16
+  %wide.load.4 = load <4 x i32>, ptr %10, align 4
+  %11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.4)
+  %12 = add i32 %11, %9
+  %13 = getelementptr inbounds i32, ptr %x, i32 20
+  %wide.load.5 = load <4 x i32>, ptr %13, align 4
+  %14 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.5)
+  %15 = add i32 %14, %12
+  %16 = getelementptr inbounds i32, ptr %x, i32 24
+  %wide.load.6 = load <4 x i32>, ptr %16, align 4
+  %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.6)
+  %18 = add i32 %17, %15
+  %19 = getelementptr inbounds i32, ptr %x, i32 28
+  %wide.load.7 = load <4 x i32>, ptr %19, align 4
+  %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.7)
+  %21 = add i32 %20, %18
+  %22 = getelementptr inbounds i32, ptr %x, i32 32
+  %wide.load.8 = load <4 x i32>, ptr %22, align 4
+  %23 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.8)
+  %24 = add i32 %23, %21
+  %25 = getelementptr inbounds i32, ptr %x, i32 36
+  %wide.load.9 = load <4 x i32>, ptr %25, align 4
+  %26 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.9)
+  %27 = add i32 %26, %24
+  %28 = getelementptr inbounds i32, ptr %x, i32 40
+  %wide.load.10 = load <4 x i32>, ptr %28, align 4
+  %29 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.10)
+  %30 = add i32 %29, %27
+  %31 = getelementptr inbounds i32, ptr %x, i32 44
+  %wide.load.11 = load <4 x i32>, ptr %31, align 4
+  %32 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.11)
+  %33 = add i32 %32, %30
+  %34 = getelementptr inbounds i32, ptr %x, i32 48
+  %wide.load.12 = load <4 x i32>, ptr %34, align 4
+  %35 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.12)
+  %36 = add i32 %35, %33
+  %37 = getelementptr inbounds i32, ptr %x, i32 52
+  %wide.load.13 = load <4 x i32>, ptr %37, align 4
+  %38 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.13)
+  %39 = add i32 %38, %36
+  %40 = getelementptr inbounds i32, ptr %x, i32 56
+  %wide.load.14 = load <4 x i32>, ptr %40, align 4
+  %41 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.14)
+  %42 = add i32 %41, %39
+  %43 = getelementptr inbounds i32, ptr %x, i32 60
+  %wide.load.15 = load <4 x i32>, ptr %43, align 4
+  %44 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.15)
+  %45 = add i32 %44, %42
+  %46 = getelementptr inbounds i32, ptr %x, i32 64
+  %wide.load.16 = load <4 x i32>, ptr %46, align 4
+  %47 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.16)
+  %48 = add i32 %47, %45
+  %49 = getelementptr inbounds i32, ptr %x, i32 68
+  %wide.load.17 = load <4 x i32>, ptr %49, align 4
+  %50 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.17)
+  %51 = add i32 %50, %48
+  %52 = getelementptr inbounds i32, ptr %x, i32 72
+  %wide.load.18 = load <4 x i32>, ptr %52, align 4
+  %53 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.18)
+  %54 = add i32 %53, %51
+  %55 = getelementptr inbounds i32, ptr %x, i32 76
+  %wide.load.19 = load <4 x i32>, ptr %55, align 4
+  %56 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.19)
+  %57 = add i32 %56, %54
+  %58 = getelementptr inbounds i32, ptr %x, i32 80
+  %wide.load.20 = load <4 x i32>, ptr %58, align 4
+  %59 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.20)
+  %60 = add i32 %59, %57
+  %61 = getelementptr inbounds i32, ptr %x, i32 84
+  %wide.load.21 = load <4 x i32>, ptr %61, align 4
+  %62 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.21)
+  %63 = add i32 %62, %60
+  %64 = getelementptr inbounds i32, ptr %x, i32 88
+  %wide.load.22 = load <4 x i32>, ptr %64, align 4
+  %65 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.22)
+  %66 = add i32 %65, %63
+  %67 = getelementptr inbounds i32, ptr %x, i32 92
+  %wide.load.23 = load <4 x i32>, ptr %67, align 4
+  %68 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.23)
+  %69 = add i32 %68, %66
+  %70 = getelementptr inbounds i32, ptr %x, i32 96
+  %wide.load.24 = load <4 x i32>, ptr %70, align 4
+  %71 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.24)
+  %72 = add i32 %71, %69
+  %73 = getelementptr inbounds i32, ptr %x, i32 100
+  %wide.load.25 = load <4 x i32>, ptr %73, align 4
+  %74 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.25)
+  %75 = add i32 %74, %72
+  %76 = getelementptr inbounds i32, ptr %x, i32 104
+  %wide.load.26 = load <4 x i32>, ptr %76, align 4
+  %77 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.26)
+  %78 = add i32 %77, %75
+  %79 = getelementptr inbounds i32, ptr %x, i32 108
+  %wide.load.27 = load <4 x i32>, ptr %79, align 4
+  %80 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.27)
+  %81 = add i32 %80, %78
+  %82 = getelementptr inbounds i32, ptr %x, i32 112
+  %wide.load.28 = load <4 x i32>, ptr %82, align 4
+  %83 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.28)
+  %84 = add i32 %83, %81
+  %85 = getelementptr inbounds i32, ptr %x, i32 116
+  %wide.load.29 = load <4 x i32>, ptr %85, align 4
+  %86 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.29)
+  %87 = add i32 %86, %84
+  %88 = getelementptr inbounds i32, ptr %x, i32 120
+  %wide.load.30 = load <4 x i32>, ptr %88, align 4
+  %89 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.30)
+  %90 = add i32 %89, %87
+  %91 = getelementptr inbounds i32, ptr %x, i32 124
+  %wide.load.31 = load <4 x i32>, ptr %91, align 4
+  %92 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load.31)
+  %93 = add i32 %92, %90
+  ret i32 %93
+}
+
+define i32 @addv2i32i16(ptr %x) {
 ; CHECK-LABEL: addv2i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrsh.w r1, [r0]
@@ -406,44 +367,42 @@ define i32 @addv2i32i16(i16* %x) {
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i16, i16* %x, align 2
+  %0 = load i16, ptr %x, align 2
   %conv = sext i16 %0 to i32
-  %arrayidx.1 = getelementptr inbounds i16, i16* %x, i32 1
-  %1 = load i16, i16* %arrayidx.1, align 2
+  %arrayidx.1 = getelementptr inbounds i16, ptr %x, i32 1
+  %1 = load i16, ptr %arrayidx.1, align 2
   %conv.1 = sext i16 %1 to i32
   %add.1 = add nsw i32 %conv, %conv.1
   ret i32 %add.1
 }
 
-define i32 @addv4i32i16(i16* %x) {
+define i32 @addv4i32i16(ptr %x) {
 ; CHECK-LABEL: addv4i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
-  ret i32 %3
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv8i32i16(i16* %x) {
+define i32 @addv8i32i16(ptr %x) {
 ; CHECK-LABEL: addv8i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
 ; CHECK-NEXT:    vaddv.s16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = sext <8 x i16> %1 to <8 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
-  ret i32 %3
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = sext <8 x i16> %0 to <8 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv16i32i16(i16* %x) {
+define i32 @addv16i32i16(ptr %x) {
 ; CHECK-LABEL: addv16i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0]
@@ -457,14 +416,13 @@ define i32 @addv16i32i16(i16* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = sext <16 x i16> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %0 = load <16 x i16>, ptr %x, align 2
+  %1 = sext <16 x i16> %0 to <16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv24i32i16(i16* %x) {
+define i32 @addv24i32i16(ptr %x) {
 ; CHECK-LABEL: addv24i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0]
@@ -480,20 +438,18 @@ define i32 @addv24i32i16(i16* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = sext <16 x i16> %1 to <16 x i32>
-  %arrayidx.16 = getelementptr inbounds i16, i16* %x, i32 16
-  %3 = bitcast i16* %arrayidx.16 to <8 x i16>*
-  %4 = load <8 x i16>, <8 x i16>* %3, align 2
-  %5 = sext <8 x i16> %4 to <8 x i32>
-  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %5)
-  %op.rdx = add nsw i32 %6, %7
+  %0 = load <16 x i16>, ptr %x, align 2
+  %1 = sext <16 x i16> %0 to <16 x i32>
+  %arrayidx.16 = getelementptr inbounds i16, ptr %x, i32 16
+  %2 = load <8 x i16>, ptr %arrayidx.16, align 2
+  %3 = sext <8 x i16> %2 to <8 x i32>
+  %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3)
+  %op.rdx = add nsw i32 %4, %5
   ret i32 %op.rdx
 }
 
-define i32 @addv32i32i16(i16* %x) {
+define i32 @addv32i32i16(ptr %x) {
 ; CHECK-LABEL: addv32i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0]
@@ -515,14 +471,13 @@ define i32 @addv32i32i16(i16* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <32 x i16>*
-  %1 = load <32 x i16>, <32 x i16>* %0, align 2
-  %2 = sext <32 x i16> %1 to <32 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %2)
-  ret i32 %3
+  %0 = load <32 x i16>, ptr %x, align 2
+  %1 = sext <32 x i16> %0 to <32 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv64i32i16(i16* %x) {
+define i32 @addv64i32i16(ptr %x) {
 ; CHECK-LABEL: addv64i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q1, [r0]
@@ -563,48 +518,44 @@ define i32 @addv64i32i16(i16* %x) {
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <32 x i16>*
-  %1 = load <32 x i16>, <32 x i16>* %0, align 2
-  %2 = sext <32 x i16> %1 to <32 x i32>
-  %arrayidx.32 = getelementptr inbounds i16, i16* %x, i32 32
-  %3 = bitcast i16* %arrayidx.32 to <16 x i16>*
-  %4 = load <16 x i16>, <16 x i16>* %3, align 2
-  %5 = sext <16 x i16> %4 to <16 x i32>
-  %arrayidx.48 = getelementptr inbounds i16, i16* %x, i32 48
-  %6 = bitcast i16* %arrayidx.48 to <8 x i16>*
-  %7 = load <8 x i16>, <8 x i16>* %6, align 2
-  %8 = sext <8 x i16> %7 to <8 x i32>
-  %arrayidx.56 = getelementptr inbounds i16, i16* %x, i32 56
-  %9 = bitcast i16* %arrayidx.56 to <4 x i16>*
-  %10 = load <4 x i16>, <4 x i16>* %9, align 2
-  %11 = sext <4 x i16> %10 to <4 x i32>
-  %arrayidx.60 = getelementptr inbounds i16, i16* %x, i32 60
-  %12 = load i16, i16* %arrayidx.60, align 2
-  %conv.60 = sext i16 %12 to i32
-  %arrayidx.61 = getelementptr inbounds i16, i16* %x, i32 61
-  %13 = load i16, i16* %arrayidx.61, align 2
-  %conv.61 = sext i16 %13 to i32
-  %arrayidx.62 = getelementptr inbounds i16, i16* %x, i32 62
-  %14 = load i16, i16* %arrayidx.62, align 2
-  %conv.62 = sext i16 %14 to i32
-  %15 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %2)
-  %16 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
-  %op.rdx = add nsw i32 %15, %16
-  %17 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %8)
-  %op.rdx8 = add nsw i32 %op.rdx, %17
-  %18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %11)
-  %op.rdx9 = add nsw i32 %op.rdx8, %18
-  %19 = add nsw i32 %op.rdx9, %conv.60
-  %20 = add nsw i32 %19, %conv.61
-  %21 = add nsw i32 %20, %conv.62
-  %arrayidx.63 = getelementptr inbounds i16, i16* %x, i32 63
-  %22 = load i16, i16* %arrayidx.63, align 2
-  %conv.63 = sext i16 %22 to i32
-  %add.63 = add nsw i32 %21, %conv.63
+  %0 = load <32 x i16>, ptr %x, align 2
+  %1 = sext <32 x i16> %0 to <32 x i32>
+  %arrayidx.32 = getelementptr inbounds i16, ptr %x, i32 32
+  %2 = load <16 x i16>, ptr %arrayidx.32, align 2
+  %3 = sext <16 x i16> %2 to <16 x i32>
+  %arrayidx.48 = getelementptr inbounds i16, ptr %x, i32 48
+  %4 = load <8 x i16>, ptr %arrayidx.48, align 2
+  %5 = sext <8 x i16> %4 to <8 x i32>
+  %arrayidx.56 = getelementptr inbounds i16, ptr %x, i32 56
+  %6 = load <4 x i16>, ptr %arrayidx.56, align 2
+  %7 = sext <4 x i16> %6 to <4 x i32>
+  %arrayidx.60 = getelementptr inbounds i16, ptr %x, i32 60
+  %8 = load i16, ptr %arrayidx.60, align 2
+  %conv.60 = sext i16 %8 to i32
+  %arrayidx.61 = getelementptr inbounds i16, ptr %x, i32 61
+  %9 = load i16, ptr %arrayidx.61, align 2
+  %conv.61 = sext i16 %9 to i32
+  %arrayidx.62 = getelementptr inbounds i16, ptr %x, i32 62
+  %10 = load i16, ptr %arrayidx.62, align 2
+  %conv.62 = sext i16 %10 to i32
+  %11 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
+  %12 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
+  %op.rdx = add nsw i32 %11, %12
+  %13 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %5)
+  %op.rdx8 = add nsw i32 %op.rdx, %13
+  %14 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
+  %op.rdx9 = add nsw i32 %op.rdx8, %14
+  %15 = add nsw i32 %op.rdx9, %conv.60
+  %16 = add nsw i32 %15, %conv.61
+  %17 = add nsw i32 %16, %conv.62
+  %arrayidx.63 = getelementptr inbounds i16, ptr %x, i32 63
+  %18 = load i16, ptr %arrayidx.63, align 2
+  %conv.63 = sext i16 %18 to i32
+  %add.63 = add nsw i32 %17, %conv.63
   ret i32 %add.63
 }
 
-define i32 @addv128i32i16(i16* %x) {
+define i32 @addv128i32i16(ptr %x) {
 ; CHECK-LABEL: addv128i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -642,104 +593,88 @@ define i32 @addv128i32i16(i16* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = sext <8 x i16> %wide.load to <8 x i32>
-  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
-  %3 = getelementptr inbounds i16, i16* %x, i32 8
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load.1 to <8 x i32>
-  %6 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %5)
-  %7 = add i32 %6, %2
-  %8 = getelementptr inbounds i16, i16* %x, i32 16
-  %9 = bitcast i16* %8 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %9, align 2
-  %10 = sext <8 x i16> %wide.load.2 to <8 x i32>
-  %11 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %10)
-  %12 = add i32 %11, %7
-  %13 = getelementptr inbounds i16, i16* %x, i32 24
-  %14 = bitcast i16* %13 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %14, align 2
-  %15 = sext <8 x i16> %wide.load.3 to <8 x i32>
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  %1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %0)
+  %2 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load.1 to <8 x i32>
+  %4 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3)
+  %5 = add i32 %4, %1
+  %6 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %6, align 2
+  %7 = sext <8 x i16> %wide.load.2 to <8 x i32>
+  %8 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %7)
+  %9 = add i32 %8, %5
+  %10 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %10, align 2
+  %11 = sext <8 x i16> %wide.load.3 to <8 x i32>
+  %12 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %11)
+  %13 = add i32 %12, %9
+  %14 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %14, align 2
+  %15 = sext <8 x i16> %wide.load.4 to <8 x i32>
   %16 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %15)
-  %17 = add i32 %16, %12
-  %18 = getelementptr inbounds i16, i16* %x, i32 32
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = sext <8 x i16> %wide.load.4 to <8 x i32>
-  %21 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %20)
-  %22 = add i32 %21, %17
-  %23 = getelementptr inbounds i16, i16* %x, i32 40
-  %24 = bitcast i16* %23 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %24, align 2
-  %25 = sext <8 x i16> %wide.load.5 to <8 x i32>
-  %26 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %25)
-  %27 = add i32 %26, %22
-  %28 = getelementptr inbounds i16, i16* %x, i32 48
-  %29 = bitcast i16* %28 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %29, align 2
-  %30 = sext <8 x i16> %wide.load.6 to <8 x i32>
-  %31 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %30)
-  %32 = add i32 %31, %27
-  %33 = getelementptr inbounds i16, i16* %x, i32 56
-  %34 = bitcast i16* %33 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %34, align 2
-  %35 = sext <8 x i16> %wide.load.7 to <8 x i32>
+  %17 = add i32 %16, %13
+  %18 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %18, align 2
+  %19 = sext <8 x i16> %wide.load.5 to <8 x i32>
+  %20 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %19)
+  %21 = add i32 %20, %17
+  %22 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %22, align 2
+  %23 = sext <8 x i16> %wide.load.6 to <8 x i32>
+  %24 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %23)
+  %25 = add i32 %24, %21
+  %26 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %26, align 2
+  %27 = sext <8 x i16> %wide.load.7 to <8 x i32>
+  %28 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %27)
+  %29 = add i32 %28, %25
+  %30 = getelementptr inbounds i16, ptr %x, i32 64
+  %wide.load.8 = load <8 x i16>, ptr %30, align 2
+  %31 = sext <8 x i16> %wide.load.8 to <8 x i32>
+  %32 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %31)
+  %33 = add i32 %32, %29
+  %34 = getelementptr inbounds i16, ptr %x, i32 72
+  %wide.load.9 = load <8 x i16>, ptr %34, align 2
+  %35 = sext <8 x i16> %wide.load.9 to <8 x i32>
   %36 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %35)
-  %37 = add i32 %36, %32
-  %38 = getelementptr inbounds i16, i16* %x, i32 64
-  %39 = bitcast i16* %38 to <8 x i16>*
-  %wide.load.8 = load <8 x i16>, <8 x i16>* %39, align 2
-  %40 = sext <8 x i16> %wide.load.8 to <8 x i32>
-  %41 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %40)
-  %42 = add i32 %41, %37
-  %43 = getelementptr inbounds i16, i16* %x, i32 72
-  %44 = bitcast i16* %43 to <8 x i16>*
-  %wide.load.9 = load <8 x i16>, <8 x i16>* %44, align 2
-  %45 = sext <8 x i16> %wide.load.9 to <8 x i32>
-  %46 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %45)
-  %47 = add i32 %46, %42
-  %48 = getelementptr inbounds i16, i16* %x, i32 80
-  %49 = bitcast i16* %48 to <8 x i16>*
-  %wide.load.10 = load <8 x i16>, <8 x i16>* %49, align 2
-  %50 = sext <8 x i16> %wide.load.10 to <8 x i32>
-  %51 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %50)
-  %52 = add i32 %51, %47
-  %53 = getelementptr inbounds i16, i16* %x, i32 88
-  %54 = bitcast i16* %53 to <8 x i16>*
-  %wide.load.11 = load <8 x i16>, <8 x i16>* %54, align 2
-  %55 = sext <8 x i16> %wide.load.11 to <8 x i32>
+  %37 = add i32 %36, %33
+  %38 = getelementptr inbounds i16, ptr %x, i32 80
+  %wide.load.10 = load <8 x i16>, ptr %38, align 2
+  %39 = sext <8 x i16> %wide.load.10 to <8 x i32>
+  %40 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %39)
+  %41 = add i32 %40, %37
+  %42 = getelementptr inbounds i16, ptr %x, i32 88
+  %wide.load.11 = load <8 x i16>, ptr %42, align 2
+  %43 = sext <8 x i16> %wide.load.11 to <8 x i32>
+  %44 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %43)
+  %45 = add i32 %44, %41
+  %46 = getelementptr inbounds i16, ptr %x, i32 96
+  %wide.load.12 = load <8 x i16>, ptr %46, align 2
+  %47 = sext <8 x i16> %wide.load.12 to <8 x i32>
+  %48 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %47)
+  %49 = add i32 %48, %45
+  %50 = getelementptr inbounds i16, ptr %x, i32 104
+  %wide.load.13 = load <8 x i16>, ptr %50, align 2
+  %51 = sext <8 x i16> %wide.load.13 to <8 x i32>
+  %52 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %51)
+  %53 = add i32 %52, %49
+  %54 = getelementptr inbounds i16, ptr %x, i32 112
+  %wide.load.14 = load <8 x i16>, ptr %54, align 2
+  %55 = sext <8 x i16> %wide.load.14 to <8 x i32>
   %56 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %55)
-  %57 = add i32 %56, %52
-  %58 = getelementptr inbounds i16, i16* %x, i32 96
-  %59 = bitcast i16* %58 to <8 x i16>*
-  %wide.load.12 = load <8 x i16>, <8 x i16>* %59, align 2
-  %60 = sext <8 x i16> %wide.load.12 to <8 x i32>
-  %61 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %60)
-  %62 = add i32 %61, %57
-  %63 = getelementptr inbounds i16, i16* %x, i32 104
-  %64 = bitcast i16* %63 to <8 x i16>*
-  %wide.load.13 = load <8 x i16>, <8 x i16>* %64, align 2
-  %65 = sext <8 x i16> %wide.load.13 to <8 x i32>
-  %66 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %65)
-  %67 = add i32 %66, %62
-  %68 = getelementptr inbounds i16, i16* %x, i32 112
-  %69 = bitcast i16* %68 to <8 x i16>*
-  %wide.load.14 = load <8 x i16>, <8 x i16>* %69, align 2
-  %70 = sext <8 x i16> %wide.load.14 to <8 x i32>
-  %71 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %70)
-  %72 = add i32 %71, %67
-  %73 = getelementptr inbounds i16, i16* %x, i32 120
-  %74 = bitcast i16* %73 to <8 x i16>*
-  %wide.load.15 = load <8 x i16>, <8 x i16>* %74, align 2
-  %75 = sext <8 x i16> %wide.load.15 to <8 x i32>
-  %76 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %75)
-  %77 = add i32 %76, %72
-  ret i32 %77
-}
-
-define i32 @addv2i32i8(i8* %x) {
+  %57 = add i32 %56, %53
+  %58 = getelementptr inbounds i16, ptr %x, i32 120
+  %wide.load.15 = load <8 x i16>, ptr %58, align 2
+  %59 = sext <8 x i16> %wide.load.15 to <8 x i32>
+  %60 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %59)
+  %61 = add i32 %60, %57
+  ret i32 %61
+}
+
+define i32 @addv2i32i8(ptr %x) {
 ; CHECK-LABEL: addv2i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r1, [r0]
@@ -747,58 +682,55 @@ define i32 @addv2i32i8(i8* %x) {
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i8, i8* %x, align 1
+  %0 = load i8, ptr %x, align 1
   %conv = zext i8 %0 to i32
-  %arrayidx.1 = getelementptr inbounds i8, i8* %x, i32 1
-  %1 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx.1 = getelementptr inbounds i8, ptr %x, i32 1
+  %1 = load i8, ptr %arrayidx.1, align 1
   %conv.1 = zext i8 %1 to i32
   %add.1 = add nuw nsw i32 %conv, %conv.1
   ret i32 %add.1
 }
 
-define i32 @addv4i32i8(i8* %x) {
+define i32 @addv4i32i8(ptr %x) {
 ; CHECK-LABEL: addv4i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
-  ret i32 %3
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv8i32i8(i8* %x) {
+define i32 @addv8i32i8(ptr %x) {
 ; CHECK-LABEL: addv8i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
 ; CHECK-NEXT:    vaddv.u16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
-  ret i32 %3
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv16i32i8(i8* %x) {
+define i32 @addv16i32i8(ptr %x) {
 ; CHECK-LABEL: addv16i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-NEXT:    vaddv.u8 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %0 = load <16 x i8>, ptr %x, align 1
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv24i32i8(i8* %x) {
+define i32 @addv24i32i8(ptr %x) {
 ; CHECK-LABEL: addv24i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
@@ -807,20 +739,18 @@ define i32 @addv24i32i8(i8* %x) {
 ; CHECK-NEXT:    vaddva.u16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %arrayidx.16 = getelementptr inbounds i8, i8* %x, i32 16
-  %3 = bitcast i8* %arrayidx.16 to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3, align 1
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %5)
-  %op.rdx = add nuw nsw i32 %6, %7
+  %0 = load <16 x i8>, ptr %x, align 1
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %arrayidx.16 = getelementptr inbounds i8, ptr %x, i32 16
+  %2 = load <8 x i8>, ptr %arrayidx.16, align 1
+  %3 = zext <8 x i8> %2 to <8 x i32>
+  %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3)
+  %op.rdx = add nuw nsw i32 %4, %5
   ret i32 %op.rdx
 }
 
-define i32 @addv32i32i8(i8* %x) {
+define i32 @addv32i32i8(ptr %x) {
 ; CHECK-LABEL: addv32i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r0]
@@ -842,14 +772,13 @@ define i32 @addv32i32i8(i8* %x) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <32 x i8>*
-  %1 = load <32 x i8>, <32 x i8>* %0, align 1
-  %2 = zext <32 x i8> %1 to <32 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %2)
-  ret i32 %3
+  %0 = load <32 x i8>, ptr %x, align 1
+  %1 = zext <32 x i8> %0 to <32 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @addv64i32i8(i8* %x) {
+define i32 @addv64i32i8(ptr %x) {
 ; CHECK-LABEL: addv64i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q1, [r0]
@@ -884,48 +813,44 @@ define i32 @addv64i32i8(i8* %x) {
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <32 x i8>*
-  %1 = load <32 x i8>, <32 x i8>* %0, align 1
-  %2 = zext <32 x i8> %1 to <32 x i32>
-  %arrayidx.32 = getelementptr inbounds i8, i8* %x, i32 32
-  %3 = bitcast i8* %arrayidx.32 to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3, align 1
-  %5 = zext <16 x i8> %4 to <16 x i32>
-  %arrayidx.48 = getelementptr inbounds i8, i8* %x, i32 48
-  %6 = bitcast i8* %arrayidx.48 to <8 x i8>*
-  %7 = load <8 x i8>, <8 x i8>* %6, align 1
-  %8 = zext <8 x i8> %7 to <8 x i32>
-  %arrayidx.56 = getelementptr inbounds i8, i8* %x, i32 56
-  %9 = bitcast i8* %arrayidx.56 to <4 x i8>*
-  %10 = load <4 x i8>, <4 x i8>* %9, align 1
-  %11 = zext <4 x i8> %10 to <4 x i32>
-  %arrayidx.60 = getelementptr inbounds i8, i8* %x, i32 60
-  %12 = load i8, i8* %arrayidx.60, align 1
-  %conv.60 = zext i8 %12 to i32
-  %arrayidx.61 = getelementptr inbounds i8, i8* %x, i32 61
-  %13 = load i8, i8* %arrayidx.61, align 1
-  %conv.61 = zext i8 %13 to i32
-  %arrayidx.62 = getelementptr inbounds i8, i8* %x, i32 62
-  %14 = load i8, i8* %arrayidx.62, align 1
-  %conv.62 = zext i8 %14 to i32
-  %15 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %2)
-  %16 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
-  %op.rdx = add nuw nsw i32 %15, %16
-  %17 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %8)
-  %op.rdx8 = add nuw nsw i32 %op.rdx, %17
-  %18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %11)
-  %op.rdx9 = add nuw nsw i32 %op.rdx8, %18
-  %19 = add nuw nsw i32 %op.rdx9, %conv.60
-  %20 = add nuw nsw i32 %19, %conv.61
-  %21 = add nuw nsw i32 %20, %conv.62
-  %arrayidx.63 = getelementptr inbounds i8, i8* %x, i32 63
-  %22 = load i8, i8* %arrayidx.63, align 1
-  %conv.63 = zext i8 %22 to i32
-  %add.63 = add nuw nsw i32 %21, %conv.63
+  %0 = load <32 x i8>, ptr %x, align 1
+  %1 = zext <32 x i8> %0 to <32 x i32>
+  %arrayidx.32 = getelementptr inbounds i8, ptr %x, i32 32
+  %2 = load <16 x i8>, ptr %arrayidx.32, align 1
+  %3 = zext <16 x i8> %2 to <16 x i32>
+  %arrayidx.48 = getelementptr inbounds i8, ptr %x, i32 48
+  %4 = load <8 x i8>, ptr %arrayidx.48, align 1
+  %5 = zext <8 x i8> %4 to <8 x i32>
+  %arrayidx.56 = getelementptr inbounds i8, ptr %x, i32 56
+  %6 = load <4 x i8>, ptr %arrayidx.56, align 1
+  %7 = zext <4 x i8> %6 to <4 x i32>
+  %arrayidx.60 = getelementptr inbounds i8, ptr %x, i32 60
+  %8 = load i8, ptr %arrayidx.60, align 1
+  %conv.60 = zext i8 %8 to i32
+  %arrayidx.61 = getelementptr inbounds i8, ptr %x, i32 61
+  %9 = load i8, ptr %arrayidx.61, align 1
+  %conv.61 = zext i8 %9 to i32
+  %arrayidx.62 = getelementptr inbounds i8, ptr %x, i32 62
+  %10 = load i8, ptr %arrayidx.62, align 1
+  %conv.62 = zext i8 %10 to i32
+  %11 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
+  %12 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
+  %op.rdx = add nuw nsw i32 %11, %12
+  %13 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %5)
+  %op.rdx8 = add nuw nsw i32 %op.rdx, %13
+  %14 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
+  %op.rdx9 = add nuw nsw i32 %op.rdx8, %14
+  %15 = add nuw nsw i32 %op.rdx9, %conv.60
+  %16 = add nuw nsw i32 %15, %conv.61
+  %17 = add nuw nsw i32 %16, %conv.62
+  %arrayidx.63 = getelementptr inbounds i8, ptr %x, i32 63
+  %18 = load i8, ptr %arrayidx.63, align 1
+  %conv.63 = zext i8 %18 to i32
+  %add.63 = add nuw nsw i32 %17, %conv.63
   ret i32 %add.63
 }
 
-define i32 @addv128i32i8(i8* %x) {
+define i32 @addv128i32i8(ptr %x) {
 ; CHECK-LABEL: addv128i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
@@ -947,56 +872,48 @@ define i32 @addv128i32i8(i8* %x) {
 ; CHECK-NEXT:    vaddva.u8 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = zext <16 x i8> %wide.load to <16 x i32>
-  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
-  %3 = getelementptr inbounds i8, i8* %x, i32 16
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load.1 to <16 x i32>
-  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
-  %7 = add i32 %6, %2
-  %8 = getelementptr inbounds i8, i8* %x, i32 32
-  %9 = bitcast i8* %8 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %9, align 1
-  %10 = zext <16 x i8> %wide.load.2 to <16 x i32>
-  %11 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %10)
-  %12 = add i32 %11, %7
-  %13 = getelementptr inbounds i8, i8* %x, i32 48
-  %14 = bitcast i8* %13 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %14, align 1
-  %15 = zext <16 x i8> %wide.load.3 to <16 x i32>
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i32>
+  %1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %0)
+  %2 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load.1 to <16 x i32>
+  %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
+  %5 = add i32 %4, %1
+  %6 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %6, align 1
+  %7 = zext <16 x i8> %wide.load.2 to <16 x i32>
+  %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7)
+  %9 = add i32 %8, %5
+  %10 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %10, align 1
+  %11 = zext <16 x i8> %wide.load.3 to <16 x i32>
+  %12 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %11)
+  %13 = add i32 %12, %9
+  %14 = getelementptr inbounds i8, ptr %x, i32 64
+  %wide.load.4 = load <16 x i8>, ptr %14, align 1
+  %15 = zext <16 x i8> %wide.load.4 to <16 x i32>
   %16 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %15)
-  %17 = add i32 %16, %12
-  %18 = getelementptr inbounds i8, i8* %x, i32 64
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load.4 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = zext <16 x i8> %wide.load.4 to <16 x i32>
-  %21 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %20)
-  %22 = add i32 %21, %17
-  %23 = getelementptr inbounds i8, i8* %x, i32 80
-  %24 = bitcast i8* %23 to <16 x i8>*
-  %wide.load.5 = load <16 x i8>, <16 x i8>* %24, align 1
-  %25 = zext <16 x i8> %wide.load.5 to <16 x i32>
-  %26 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %25)
-  %27 = add i32 %26, %22
-  %28 = getelementptr inbounds i8, i8* %x, i32 96
-  %29 = bitcast i8* %28 to <16 x i8>*
-  %wide.load.6 = load <16 x i8>, <16 x i8>* %29, align 1
-  %30 = zext <16 x i8> %wide.load.6 to <16 x i32>
-  %31 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %30)
-  %32 = add i32 %31, %27
-  %33 = getelementptr inbounds i8, i8* %x, i32 112
-  %34 = bitcast i8* %33 to <16 x i8>*
-  %wide.load.7 = load <16 x i8>, <16 x i8>* %34, align 1
-  %35 = zext <16 x i8> %wide.load.7 to <16 x i32>
-  %36 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %35)
-  %37 = add i32 %36, %32
-  ret i32 %37
-}
-
-define signext i16 @addv2i16i16(i16* %x) {
+  %17 = add i32 %16, %13
+  %18 = getelementptr inbounds i8, ptr %x, i32 80
+  %wide.load.5 = load <16 x i8>, ptr %18, align 1
+  %19 = zext <16 x i8> %wide.load.5 to <16 x i32>
+  %20 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %19)
+  %21 = add i32 %20, %17
+  %22 = getelementptr inbounds i8, ptr %x, i32 96
+  %wide.load.6 = load <16 x i8>, ptr %22, align 1
+  %23 = zext <16 x i8> %wide.load.6 to <16 x i32>
+  %24 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %23)
+  %25 = add i32 %24, %21
+  %26 = getelementptr inbounds i8, ptr %x, i32 112
+  %wide.load.7 = load <16 x i8>, ptr %26, align 1
+  %27 = zext <16 x i8> %wide.load.7 to <16 x i32>
+  %28 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %27)
+  %29 = add i32 %28, %25
+  ret i32 %29
+}
+
+define signext i16 @addv2i16i16(ptr %x) {
 ; CHECK-LABEL: addv2i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrh r1, [r0]
@@ -1005,14 +922,14 @@ define signext i16 @addv2i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i16, i16* %x, align 2
-  %arrayidx.1 = getelementptr inbounds i16, i16* %x, i32 1
-  %1 = load i16, i16* %arrayidx.1, align 2
+  %0 = load i16, ptr %x, align 2
+  %arrayidx.1 = getelementptr inbounds i16, ptr %x, i32 1
+  %1 = load i16, ptr %arrayidx.1, align 2
   %add.1 = add i16 %1, %0
   ret i16 %add.1
 }
 
-define signext i16 @addv4i16i16(i16* %x) {
+define signext i16 @addv4i16i16(ptr %x) {
 ; CHECK-LABEL: addv4i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -1020,13 +937,12 @@ define signext i16 @addv4i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %1)
-  ret i16 %2
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %0)
+  ret i16 %1
 }
 
-define signext i16 @addv8i16i16(i16* %x) {
+define signext i16 @addv8i16i16(ptr %x) {
 ; CHECK-LABEL: addv8i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -1034,13 +950,12 @@ define signext i16 @addv8i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1)
-  ret i16 %2
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %0)
+  ret i16 %1
 }
 
-define signext i16 @addv16i16i16(i16* %x) {
+define signext i16 @addv16i16i16(ptr %x) {
 ; CHECK-LABEL: addv16i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -1050,13 +965,12 @@ define signext i16 @addv16i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1)
-  ret i16 %2
+  %0 = load <16 x i16>, ptr %x, align 2
+  %1 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %0)
+  ret i16 %1
 }
 
-define signext i16 @addv24i16i16(i16* %x) {
+define signext i16 @addv24i16i16(ptr %x) {
 ; CHECK-LABEL: addv24i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -1068,18 +982,16 @@ define signext i16 @addv24i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %arrayidx.8 = getelementptr inbounds i16, i16* %x, i32 8
-  %2 = bitcast i16* %arrayidx.8 to <16 x i16>*
-  %3 = load <16 x i16>, <16 x i16>* %2, align 2
-  %4 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %3)
-  %5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1)
-  %op.rdx = add i16 %4, %5
+  %0 = load <8 x i16>, ptr %x, align 2
+  %arrayidx.8 = getelementptr inbounds i16, ptr %x, i32 8
+  %1 = load <16 x i16>, ptr %arrayidx.8, align 2
+  %2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1)
+  %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %0)
+  %op.rdx = add i16 %2, %3
   ret i16 %op.rdx
 }
 
-define signext i16 @addv32i16i16(i16* %x) {
+define signext i16 @addv32i16i16(ptr %x) {
 ; CHECK-LABEL: addv32i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -1093,13 +1005,12 @@ define signext i16 @addv32i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <32 x i16>*
-  %1 = load <32 x i16>, <32 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %1)
-  ret i16 %2
+  %0 = load <32 x i16>, ptr %x, align 2
+  %1 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %0)
+  ret i16 %1
 }
 
-define signext i16 @addv64i16i16(i16* %x) {
+define signext i16 @addv64i16i16(ptr %x) {
 ; CHECK-LABEL: addv64i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -1121,13 +1032,12 @@ define signext i16 @addv64i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <64 x i16>*
-  %1 = load <64 x i16>, <64 x i16>* %0, align 2
-  %2 = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %1)
-  ret i16 %2
+  %0 = load <64 x i16>, ptr %x, align 2
+  %1 = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %0)
+  ret i16 %1
 }
 
-define signext i16 @addv128i16i16(i16* %x) {
+define signext i16 @addv128i16i16(ptr %x) {
 ; CHECK-LABEL: addv128i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q1, [r0]
@@ -1165,88 +1075,72 @@ define signext i16 @addv128i16i16(i16* %x) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load)
-  %2 = getelementptr inbounds i16, i16* %x, i32 8
-  %3 = bitcast i16* %2 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %3, align 2
-  %4 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.1)
-  %5 = add i16 %4, %1
-  %6 = getelementptr inbounds i16, i16* %x, i32 16
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.2)
-  %9 = add i16 %8, %5
-  %10 = getelementptr inbounds i16, i16* %x, i32 24
-  %11 = bitcast i16* %10 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %11, align 2
-  %12 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.3)
-  %13 = add i16 %12, %9
-  %14 = getelementptr inbounds i16, i16* %x, i32 32
-  %15 = bitcast i16* %14 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %15, align 2
-  %16 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.4)
-  %17 = add i16 %16, %13
-  %18 = getelementptr inbounds i16, i16* %x, i32 40
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.5)
-  %21 = add i16 %20, %17
-  %22 = getelementptr inbounds i16, i16* %x, i32 48
-  %23 = bitcast i16* %22 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %23, align 2
-  %24 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.6)
-  %25 = add i16 %24, %21
-  %26 = getelementptr inbounds i16, i16* %x, i32 56
-  %27 = bitcast i16* %26 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %27, align 2
-  %28 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.7)
-  %29 = add i16 %28, %25
-  %30 = getelementptr inbounds i16, i16* %x, i32 64
-  %31 = bitcast i16* %30 to <8 x i16>*
-  %wide.load.8 = load <8 x i16>, <8 x i16>* %31, align 2
-  %32 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.8)
-  %33 = add i16 %32, %29
-  %34 = getelementptr inbounds i16, i16* %x, i32 72
-  %35 = bitcast i16* %34 to <8 x i16>*
-  %wide.load.9 = load <8 x i16>, <8 x i16>* %35, align 2
-  %36 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.9)
-  %37 = add i16 %36, %33
-  %38 = getelementptr inbounds i16, i16* %x, i32 80
-  %39 = bitcast i16* %38 to <8 x i16>*
-  %wide.load.10 = load <8 x i16>, <8 x i16>* %39, align 2
-  %40 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.10)
-  %41 = add i16 %40, %37
-  %42 = getelementptr inbounds i16, i16* %x, i32 88
-  %43 = bitcast i16* %42 to <8 x i16>*
-  %wide.load.11 = load <8 x i16>, <8 x i16>* %43, align 2
-  %44 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.11)
-  %45 = add i16 %44, %41
-  %46 = getelementptr inbounds i16, i16* %x, i32 96
-  %47 = bitcast i16* %46 to <8 x i16>*
-  %wide.load.12 = load <8 x i16>, <8 x i16>* %47, align 2
-  %48 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.12)
-  %49 = add i16 %48, %45
-  %50 = getelementptr inbounds i16, i16* %x, i32 104
-  %51 = bitcast i16* %50 to <8 x i16>*
-  %wide.load.13 = load <8 x i16>, <8 x i16>* %51, align 2
-  %52 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.13)
-  %53 = add i16 %52, %49
-  %54 = getelementptr inbounds i16, i16* %x, i32 112
-  %55 = bitcast i16* %54 to <8 x i16>*
-  %wide.load.14 = load <8 x i16>, <8 x i16>* %55, align 2
-  %56 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.14)
-  %57 = add i16 %56, %53
-  %58 = getelementptr inbounds i16, i16* %x, i32 120
-  %59 = bitcast i16* %58 to <8 x i16>*
-  %wide.load.15 = load <8 x i16>, <8 x i16>* %59, align 2
-  %60 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.15)
-  %61 = add i16 %60, %57
-  ret i16 %61
-}
-
-define zeroext i8 @addv2i8i8(i8* %x) {
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %0 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load)
+  %1 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %1, align 2
+  %2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.1)
+  %3 = add i16 %2, %0
+  %4 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %4, align 2
+  %5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.2)
+  %6 = add i16 %5, %3
+  %7 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %7, align 2
+  %8 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.3)
+  %9 = add i16 %8, %6
+  %10 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %10, align 2
+  %11 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.4)
+  %12 = add i16 %11, %9
+  %13 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %13, align 2
+  %14 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.5)
+  %15 = add i16 %14, %12
+  %16 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %16, align 2
+  %17 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.6)
+  %18 = add i16 %17, %15
+  %19 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %19, align 2
+  %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.7)
+  %21 = add i16 %20, %18
+  %22 = getelementptr inbounds i16, ptr %x, i32 64
+  %wide.load.8 = load <8 x i16>, ptr %22, align 2
+  %23 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.8)
+  %24 = add i16 %23, %21
+  %25 = getelementptr inbounds i16, ptr %x, i32 72
+  %wide.load.9 = load <8 x i16>, ptr %25, align 2
+  %26 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.9)
+  %27 = add i16 %26, %24
+  %28 = getelementptr inbounds i16, ptr %x, i32 80
+  %wide.load.10 = load <8 x i16>, ptr %28, align 2
+  %29 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.10)
+  %30 = add i16 %29, %27
+  %31 = getelementptr inbounds i16, ptr %x, i32 88
+  %wide.load.11 = load <8 x i16>, ptr %31, align 2
+  %32 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.11)
+  %33 = add i16 %32, %30
+  %34 = getelementptr inbounds i16, ptr %x, i32 96
+  %wide.load.12 = load <8 x i16>, ptr %34, align 2
+  %35 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.12)
+  %36 = add i16 %35, %33
+  %37 = getelementptr inbounds i16, ptr %x, i32 104
+  %wide.load.13 = load <8 x i16>, ptr %37, align 2
+  %38 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.13)
+  %39 = add i16 %38, %36
+  %40 = getelementptr inbounds i16, ptr %x, i32 112
+  %wide.load.14 = load <8 x i16>, ptr %40, align 2
+  %41 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.14)
+  %42 = add i16 %41, %39
+  %43 = getelementptr inbounds i16, ptr %x, i32 120
+  %wide.load.15 = load <8 x i16>, ptr %43, align 2
+  %44 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %wide.load.15)
+  %45 = add i16 %44, %42
+  ret i16 %45
+}
+
+define zeroext i8 @addv2i8i8(ptr %x) {
 ; CHECK-LABEL: addv2i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r1, [r0]
@@ -1255,14 +1149,14 @@ define zeroext i8 @addv2i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i8, i8* %x, align 1
-  %arrayidx.1 = getelementptr inbounds i8, i8* %x, i32 1
-  %1 = load i8, i8* %arrayidx.1, align 1
+  %0 = load i8, ptr %x, align 1
+  %arrayidx.1 = getelementptr inbounds i8, ptr %x, i32 1
+  %1 = load i8, ptr %arrayidx.1, align 1
   %add.1 = add i8 %1, %0
   ret i8 %add.1
 }
 
-define zeroext i8 @addv4i8i8(i8* %x) {
+define zeroext i8 @addv4i8i8(ptr %x) {
 ; CHECK-LABEL: addv4i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -1270,13 +1164,12 @@ define zeroext i8 @addv4i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %1)
-  ret i8 %2
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %0)
+  ret i8 %1
 }
 
-define zeroext i8 @addv8i8i8(i8* %x) {
+define zeroext i8 @addv8i8i8(ptr %x) {
 ; CHECK-LABEL: addv8i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -1284,13 +1177,12 @@ define zeroext i8 @addv8i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %1)
-  ret i8 %2
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %0)
+  ret i8 %1
 }
 
-define zeroext i8 @addv16i8i8(i8* %x) {
+define zeroext i8 @addv16i8i8(ptr %x) {
 ; CHECK-LABEL: addv16i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -1298,13 +1190,12 @@ define zeroext i8 @addv16i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %1)
-  ret i8 %2
+  %0 = load <16 x i8>, ptr %x, align 1
+  %1 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %0)
+  ret i8 %1
 }
 
-define zeroext i8 @addv24i8i8(i8* %x) {
+define zeroext i8 @addv24i8i8(ptr %x) {
 ; CHECK-LABEL: addv24i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q1, [r0]
@@ -1314,18 +1205,16 @@ define zeroext i8 @addv24i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %arrayidx.8 = getelementptr inbounds i8, i8* %x, i32 8
-  %2 = bitcast i8* %arrayidx.8 to <16 x i8>*
-  %3 = load <16 x i8>, <16 x i8>* %2, align 1
-  %4 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %3)
-  %5 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %1)
-  %op.rdx = add i8 %4, %5
+  %0 = load <8 x i8>, ptr %x, align 1
+  %arrayidx.8 = getelementptr inbounds i8, ptr %x, i32 8
+  %1 = load <16 x i8>, ptr %arrayidx.8, align 1
+  %2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %1)
+  %3 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %0)
+  %op.rdx = add i8 %2, %3
   ret i8 %op.rdx
 }
 
-define zeroext i8 @addv32i8i8(i8* %x) {
+define zeroext i8 @addv32i8i8(ptr %x) {
 ; CHECK-LABEL: addv32i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
@@ -1335,13 +1224,12 @@ define zeroext i8 @addv32i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <32 x i8>*
-  %1 = load <32 x i8>, <32 x i8>* %0, align 1
-  %2 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %1)
-  ret i8 %2
+  %0 = load <32 x i8>, ptr %x, align 1
+  %1 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %0)
+  ret i8 %1
 }
 
-define zeroext i8 @addv64i8i8(i8* %x) {
+define zeroext i8 @addv64i8i8(ptr %x) {
 ; CHECK-LABEL: addv64i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
@@ -1355,13 +1243,12 @@ define zeroext i8 @addv64i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <64 x i8>*
-  %1 = load <64 x i8>, <64 x i8>* %0, align 1
-  %2 = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %1)
-  ret i8 %2
+  %0 = load <64 x i8>, ptr %x, align 1
+  %1 = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %0)
+  ret i8 %1
 }
 
-define zeroext i8 @addv128i8i8(i8* %x) {
+define zeroext i8 @addv128i8i8(ptr %x) {
 ; CHECK-LABEL: addv128i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q1, [r0]
@@ -1383,50 +1270,42 @@ define zeroext i8 @addv128i8i8(i8* %x) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load)
-  %2 = getelementptr inbounds i8, i8* %x, i32 16
-  %3 = bitcast i8* %2 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %3, align 1
-  %4 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.1)
-  %5 = add i8 %4, %1
-  %6 = getelementptr inbounds i8, i8* %x, i32 32
-  %7 = bitcast i8* %6 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %7, align 1
-  %8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.2)
-  %9 = add i8 %8, %5
-  %10 = getelementptr inbounds i8, i8* %x, i32 48
-  %11 = bitcast i8* %10 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %11, align 1
-  %12 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.3)
-  %13 = add i8 %12, %9
-  %14 = getelementptr inbounds i8, i8* %x, i32 64
-  %15 = bitcast i8* %14 to <16 x i8>*
-  %wide.load.4 = load <16 x i8>, <16 x i8>* %15, align 1
-  %16 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.4)
-  %17 = add i8 %16, %13
-  %18 = getelementptr inbounds i8, i8* %x, i32 80
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load.5 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.5)
-  %21 = add i8 %20, %17
-  %22 = getelementptr inbounds i8, i8* %x, i32 96
-  %23 = bitcast i8* %22 to <16 x i8>*
-  %wide.load.6 = load <16 x i8>, <16 x i8>* %23, align 1
-  %24 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.6)
-  %25 = add i8 %24, %21
-  %26 = getelementptr inbounds i8, i8* %x, i32 112
-  %27 = bitcast i8* %26 to <16 x i8>*
-  %wide.load.7 = load <16 x i8>, <16 x i8>* %27, align 1
-  %28 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.7)
-  %29 = add i8 %28, %25
-  ret i8 %29
-}
-
-
-
-define i32 @mlav2i32i32(i32* %x, i32* %y) {
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %0 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load)
+  %1 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %1, align 1
+  %2 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.1)
+  %3 = add i8 %2, %0
+  %4 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %4, align 1
+  %5 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.2)
+  %6 = add i8 %5, %3
+  %7 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %7, align 1
+  %8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.3)
+  %9 = add i8 %8, %6
+  %10 = getelementptr inbounds i8, ptr %x, i32 64
+  %wide.load.4 = load <16 x i8>, ptr %10, align 1
+  %11 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.4)
+  %12 = add i8 %11, %9
+  %13 = getelementptr inbounds i8, ptr %x, i32 80
+  %wide.load.5 = load <16 x i8>, ptr %13, align 1
+  %14 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.5)
+  %15 = add i8 %14, %12
+  %16 = getelementptr inbounds i8, ptr %x, i32 96
+  %wide.load.6 = load <16 x i8>, ptr %16, align 1
+  %17 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.6)
+  %18 = add i8 %17, %15
+  %19 = getelementptr inbounds i8, ptr %x, i32 112
+  %wide.load.7 = load <16 x i8>, ptr %19, align 1
+  %20 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %wide.load.7)
+  %21 = add i8 %20, %18
+  ret i8 %21
+}
+
+
+
+define i32 @mlav2i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav2i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r0, [r0]
@@ -1435,19 +1314,19 @@ define i32 @mlav2i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    mla r0, r1, r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %x, align 4
-  %1 = load i32, i32* %y, align 4
+  %0 = load i32, ptr %x, align 4
+  %1 = load i32, ptr %y, align 4
   %mul = mul nsw i32 %1, %0
-  %arrayidx.1 = getelementptr inbounds i32, i32* %x, i32 1
-  %2 = load i32, i32* %arrayidx.1, align 4
-  %arrayidx1.1 = getelementptr inbounds i32, i32* %y, i32 1
-  %3 = load i32, i32* %arrayidx1.1, align 4
+  %arrayidx.1 = getelementptr inbounds i32, ptr %x, i32 1
+  %2 = load i32, ptr %arrayidx.1, align 4
+  %arrayidx1.1 = getelementptr inbounds i32, ptr %y, i32 1
+  %3 = load i32, ptr %arrayidx1.1, align 4
   %mul.1 = mul nsw i32 %3, %2
   %add.1 = add nsw i32 %mul.1, %mul
   ret i32 %add.1
 }
 
-define i32 @mlav4i32i32(i32* %x, i32* %y) {
+define i32 @mlav4i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav4i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1455,16 +1334,14 @@ define i32 @mlav4i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    vmlav.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <4 x i32>*
-  %1 = load <4 x i32>, <4 x i32>* %0, align 4
-  %2 = bitcast i32* %y to <4 x i32>*
-  %3 = load <4 x i32>, <4 x i32>* %2, align 4
-  %4 = mul nsw <4 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %4)
-  ret i32 %5
+  %0 = load <4 x i32>, ptr %x, align 4
+  %1 = load <4 x i32>, ptr %y, align 4
+  %2 = mul nsw <4 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
+  ret i32 %3
 }
 
-define i32 @mlav8i32i32(i32* %x, i32* %y) {
+define i32 @mlav8i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav8i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1476,16 +1353,14 @@ define i32 @mlav8i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <8 x i32>*
-  %1 = load <8 x i32>, <8 x i32>* %0, align 4
-  %2 = bitcast i32* %y to <8 x i32>*
-  %3 = load <8 x i32>, <8 x i32>* %2, align 4
-  %4 = mul nsw <8 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
-  ret i32 %5
+  %0 = load <8 x i32>, ptr %x, align 4
+  %1 = load <8 x i32>, ptr %y, align 4
+  %2 = mul nsw <8 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  ret i32 %3
 }
 
-define i32 @mlav16i32i32(i32* %x, i32* %y) {
+define i32 @mlav16i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav16i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1503,16 +1378,14 @@ define i32 @mlav16i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <16 x i32>*
-  %1 = load <16 x i32>, <16 x i32>* %0, align 4
-  %2 = bitcast i32* %y to <16 x i32>*
-  %3 = load <16 x i32>, <16 x i32>* %2, align 4
-  %4 = mul nsw <16 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
-  ret i32 %5
+  %0 = load <16 x i32>, ptr %x, align 4
+  %1 = load <16 x i32>, ptr %y, align 4
+  %2 = mul nsw <16 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
+  ret i32 %3
 }
 
-define i32 @mlav24i32i32(i32* %x, i32* %y) {
+define i32 @mlav24i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav24i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1536,25 +1409,21 @@ define i32 @mlav24i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <8 x i32>*
-  %1 = load <8 x i32>, <8 x i32>* %0, align 4
-  %2 = bitcast i32* %y to <8 x i32>*
-  %3 = load <8 x i32>, <8 x i32>* %2, align 4
-  %4 = mul nsw <8 x i32> %3, %1
-  %arrayidx.8 = getelementptr inbounds i32, i32* %x, i32 8
-  %arrayidx1.8 = getelementptr inbounds i32, i32* %y, i32 8
-  %5 = bitcast i32* %arrayidx.8 to <16 x i32>*
-  %6 = load <16 x i32>, <16 x i32>* %5, align 4
-  %7 = bitcast i32* %arrayidx1.8 to <16 x i32>*
-  %8 = load <16 x i32>, <16 x i32>* %7, align 4
-  %9 = mul nsw <16 x i32> %8, %6
-  %10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %9)
-  %11 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
-  %op.rdx = add nsw i32 %10, %11
+  %0 = load <8 x i32>, ptr %x, align 4
+  %1 = load <8 x i32>, ptr %y, align 4
+  %2 = mul nsw <8 x i32> %1, %0
+  %arrayidx.8 = getelementptr inbounds i32, ptr %x, i32 8
+  %arrayidx1.8 = getelementptr inbounds i32, ptr %y, i32 8
+  %3 = load <16 x i32>, ptr %arrayidx.8, align 4
+  %4 = load <16 x i32>, ptr %arrayidx1.8, align 4
+  %5 = mul nsw <16 x i32> %4, %3
+  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  %op.rdx = add nsw i32 %6, %7
   ret i32 %op.rdx
 }
 
-define i32 @mlav32i32i32(i32* %x, i32* %y) {
+define i32 @mlav32i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav32i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1584,16 +1453,14 @@ define i32 @mlav32i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <32 x i32>*
-  %1 = load <32 x i32>, <32 x i32>* %0, align 4
-  %2 = bitcast i32* %y to <32 x i32>*
-  %3 = load <32 x i32>, <32 x i32>* %2, align 4
-  %4 = mul nsw <32 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %4)
-  ret i32 %5
+  %0 = load <32 x i32>, ptr %x, align 4
+  %1 = load <32 x i32>, ptr %y, align 4
+  %2 = mul nsw <32 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %2)
+  ret i32 %3
 }
 
-define i32 @mlav64i32i32(i32* %x, i32* %y) {
+define i32 @mlav64i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav64i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1647,151 +1514,119 @@ define i32 @mlav64i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %0, align 4
-  %1 = bitcast i32* %y to <4 x i32>*
-  %wide.load10 = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = mul nsw <4 x i32> %wide.load10, %wide.load
-  %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
-  %4 = getelementptr inbounds i32, i32* %x, i32 4
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load.1 = load <4 x i32>, <4 x i32>* %5, align 4
-  %6 = getelementptr inbounds i32, i32* %y, i32 4
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load10.1 = load <4 x i32>, <4 x i32>* %7, align 4
-  %8 = mul nsw <4 x i32> %wide.load10.1, %wide.load.1
-  %9 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %8)
-  %10 = add i32 %9, %3
-  %11 = getelementptr inbounds i32, i32* %x, i32 8
-  %12 = bitcast i32* %11 to <4 x i32>*
-  %wide.load.2 = load <4 x i32>, <4 x i32>* %12, align 4
-  %13 = getelementptr inbounds i32, i32* %y, i32 8
-  %14 = bitcast i32* %13 to <4 x i32>*
-  %wide.load10.2 = load <4 x i32>, <4 x i32>* %14, align 4
-  %15 = mul nsw <4 x i32> %wide.load10.2, %wide.load.2
-  %16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %15)
-  %17 = add i32 %16, %10
-  %18 = getelementptr inbounds i32, i32* %x, i32 12
-  %19 = bitcast i32* %18 to <4 x i32>*
-  %wide.load.3 = load <4 x i32>, <4 x i32>* %19, align 4
-  %20 = getelementptr inbounds i32, i32* %y, i32 12
-  %21 = bitcast i32* %20 to <4 x i32>*
-  %wide.load10.3 = load <4 x i32>, <4 x i32>* %21, align 4
-  %22 = mul nsw <4 x i32> %wide.load10.3, %wide.load.3
-  %23 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %22)
-  %24 = add i32 %23, %17
-  %25 = getelementptr inbounds i32, i32* %x, i32 16
-  %26 = bitcast i32* %25 to <4 x i32>*
-  %wide.load.4 = load <4 x i32>, <4 x i32>* %26, align 4
-  %27 = getelementptr inbounds i32, i32* %y, i32 16
-  %28 = bitcast i32* %27 to <4 x i32>*
-  %wide.load10.4 = load <4 x i32>, <4 x i32>* %28, align 4
-  %29 = mul nsw <4 x i32> %wide.load10.4, %wide.load.4
+  %wide.load = load <4 x i32>, ptr %x, align 4
+  %wide.load10 = load <4 x i32>, ptr %y, align 4
+  %0 = mul nsw <4 x i32> %wide.load10, %wide.load
+  %1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
+  %2 = getelementptr inbounds i32, ptr %x, i32 4
+  %wide.load.1 = load <4 x i32>, ptr %2, align 4
+  %3 = getelementptr inbounds i32, ptr %y, i32 4
+  %wide.load10.1 = load <4 x i32>, ptr %3, align 4
+  %4 = mul nsw <4 x i32> %wide.load10.1, %wide.load.1
+  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %4)
+  %6 = add i32 %5, %1
+  %7 = getelementptr inbounds i32, ptr %x, i32 8
+  %wide.load.2 = load <4 x i32>, ptr %7, align 4
+  %8 = getelementptr inbounds i32, ptr %y, i32 8
+  %wide.load10.2 = load <4 x i32>, ptr %8, align 4
+  %9 = mul nsw <4 x i32> %wide.load10.2, %wide.load.2
+  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
+  %11 = add i32 %10, %6
+  %12 = getelementptr inbounds i32, ptr %x, i32 12
+  %wide.load.3 = load <4 x i32>, ptr %12, align 4
+  %13 = getelementptr inbounds i32, ptr %y, i32 12
+  %wide.load10.3 = load <4 x i32>, ptr %13, align 4
+  %14 = mul nsw <4 x i32> %wide.load10.3, %wide.load.3
+  %15 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
+  %16 = add i32 %15, %11
+  %17 = getelementptr inbounds i32, ptr %x, i32 16
+  %wide.load.4 = load <4 x i32>, ptr %17, align 4
+  %18 = getelementptr inbounds i32, ptr %y, i32 16
+  %wide.load10.4 = load <4 x i32>, ptr %18, align 4
+  %19 = mul nsw <4 x i32> %wide.load10.4, %wide.load.4
+  %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %19)
+  %21 = add i32 %20, %16
+  %22 = getelementptr inbounds i32, ptr %x, i32 20
+  %wide.load.5 = load <4 x i32>, ptr %22, align 4
+  %23 = getelementptr inbounds i32, ptr %y, i32 20
+  %wide.load10.5 = load <4 x i32>, ptr %23, align 4
+  %24 = mul nsw <4 x i32> %wide.load10.5, %wide.load.5
+  %25 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %24)
+  %26 = add i32 %25, %21
+  %27 = getelementptr inbounds i32, ptr %x, i32 24
+  %wide.load.6 = load <4 x i32>, ptr %27, align 4
+  %28 = getelementptr inbounds i32, ptr %y, i32 24
+  %wide.load10.6 = load <4 x i32>, ptr %28, align 4
+  %29 = mul nsw <4 x i32> %wide.load10.6, %wide.load.6
   %30 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %29)
-  %31 = add i32 %30, %24
-  %32 = getelementptr inbounds i32, i32* %x, i32 20
-  %33 = bitcast i32* %32 to <4 x i32>*
-  %wide.load.5 = load <4 x i32>, <4 x i32>* %33, align 4
-  %34 = getelementptr inbounds i32, i32* %y, i32 20
-  %35 = bitcast i32* %34 to <4 x i32>*
-  %wide.load10.5 = load <4 x i32>, <4 x i32>* %35, align 4
-  %36 = mul nsw <4 x i32> %wide.load10.5, %wide.load.5
-  %37 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %36)
-  %38 = add i32 %37, %31
-  %39 = getelementptr inbounds i32, i32* %x, i32 24
-  %40 = bitcast i32* %39 to <4 x i32>*
-  %wide.load.6 = load <4 x i32>, <4 x i32>* %40, align 4
-  %41 = getelementptr inbounds i32, i32* %y, i32 24
-  %42 = bitcast i32* %41 to <4 x i32>*
-  %wide.load10.6 = load <4 x i32>, <4 x i32>* %42, align 4
-  %43 = mul nsw <4 x i32> %wide.load10.6, %wide.load.6
-  %44 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %43)
-  %45 = add i32 %44, %38
-  %46 = getelementptr inbounds i32, i32* %x, i32 28
-  %47 = bitcast i32* %46 to <4 x i32>*
-  %wide.load.7 = load <4 x i32>, <4 x i32>* %47, align 4
-  %48 = getelementptr inbounds i32, i32* %y, i32 28
-  %49 = bitcast i32* %48 to <4 x i32>*
-  %wide.load10.7 = load <4 x i32>, <4 x i32>* %49, align 4
-  %50 = mul nsw <4 x i32> %wide.load10.7, %wide.load.7
-  %51 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %50)
-  %52 = add i32 %51, %45
-  %53 = getelementptr inbounds i32, i32* %x, i32 32
-  %54 = bitcast i32* %53 to <4 x i32>*
-  %wide.load.8 = load <4 x i32>, <4 x i32>* %54, align 4
-  %55 = getelementptr inbounds i32, i32* %y, i32 32
-  %56 = bitcast i32* %55 to <4 x i32>*
-  %wide.load10.8 = load <4 x i32>, <4 x i32>* %56, align 4
-  %57 = mul nsw <4 x i32> %wide.load10.8, %wide.load.8
-  %58 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %57)
-  %59 = add i32 %58, %52
-  %60 = getelementptr inbounds i32, i32* %x, i32 36
-  %61 = bitcast i32* %60 to <4 x i32>*
-  %wide.load.9 = load <4 x i32>, <4 x i32>* %61, align 4
-  %62 = getelementptr inbounds i32, i32* %y, i32 36
-  %63 = bitcast i32* %62 to <4 x i32>*
-  %wide.load10.9 = load <4 x i32>, <4 x i32>* %63, align 4
-  %64 = mul nsw <4 x i32> %wide.load10.9, %wide.load.9
+  %31 = add i32 %30, %26
+  %32 = getelementptr inbounds i32, ptr %x, i32 28
+  %wide.load.7 = load <4 x i32>, ptr %32, align 4
+  %33 = getelementptr inbounds i32, ptr %y, i32 28
+  %wide.load10.7 = load <4 x i32>, ptr %33, align 4
+  %34 = mul nsw <4 x i32> %wide.load10.7, %wide.load.7
+  %35 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %34)
+  %36 = add i32 %35, %31
+  %37 = getelementptr inbounds i32, ptr %x, i32 32
+  %wide.load.8 = load <4 x i32>, ptr %37, align 4
+  %38 = getelementptr inbounds i32, ptr %y, i32 32
+  %wide.load10.8 = load <4 x i32>, ptr %38, align 4
+  %39 = mul nsw <4 x i32> %wide.load10.8, %wide.load.8
+  %40 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %39)
+  %41 = add i32 %40, %36
+  %42 = getelementptr inbounds i32, ptr %x, i32 36
+  %wide.load.9 = load <4 x i32>, ptr %42, align 4
+  %43 = getelementptr inbounds i32, ptr %y, i32 36
+  %wide.load10.9 = load <4 x i32>, ptr %43, align 4
+  %44 = mul nsw <4 x i32> %wide.load10.9, %wide.load.9
+  %45 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %44)
+  %46 = add i32 %45, %41
+  %47 = getelementptr inbounds i32, ptr %x, i32 40
+  %wide.load.10 = load <4 x i32>, ptr %47, align 4
+  %48 = getelementptr inbounds i32, ptr %y, i32 40
+  %wide.load10.10 = load <4 x i32>, ptr %48, align 4
+  %49 = mul nsw <4 x i32> %wide.load10.10, %wide.load.10
+  %50 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %49)
+  %51 = add i32 %50, %46
+  %52 = getelementptr inbounds i32, ptr %x, i32 44
+  %wide.load.11 = load <4 x i32>, ptr %52, align 4
+  %53 = getelementptr inbounds i32, ptr %y, i32 44
+  %wide.load10.11 = load <4 x i32>, ptr %53, align 4
+  %54 = mul nsw <4 x i32> %wide.load10.11, %wide.load.11
+  %55 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %54)
+  %56 = add i32 %55, %51
+  %57 = getelementptr inbounds i32, ptr %x, i32 48
+  %wide.load.12 = load <4 x i32>, ptr %57, align 4
+  %58 = getelementptr inbounds i32, ptr %y, i32 48
+  %wide.load10.12 = load <4 x i32>, ptr %58, align 4
+  %59 = mul nsw <4 x i32> %wide.load10.12, %wide.load.12
+  %60 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %59)
+  %61 = add i32 %60, %56
+  %62 = getelementptr inbounds i32, ptr %x, i32 52
+  %wide.load.13 = load <4 x i32>, ptr %62, align 4
+  %63 = getelementptr inbounds i32, ptr %y, i32 52
+  %wide.load10.13 = load <4 x i32>, ptr %63, align 4
+  %64 = mul nsw <4 x i32> %wide.load10.13, %wide.load.13
   %65 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %64)
-  %66 = add i32 %65, %59
-  %67 = getelementptr inbounds i32, i32* %x, i32 40
-  %68 = bitcast i32* %67 to <4 x i32>*
-  %wide.load.10 = load <4 x i32>, <4 x i32>* %68, align 4
-  %69 = getelementptr inbounds i32, i32* %y, i32 40
-  %70 = bitcast i32* %69 to <4 x i32>*
-  %wide.load10.10 = load <4 x i32>, <4 x i32>* %70, align 4
-  %71 = mul nsw <4 x i32> %wide.load10.10, %wide.load.10
-  %72 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %71)
-  %73 = add i32 %72, %66
-  %74 = getelementptr inbounds i32, i32* %x, i32 44
-  %75 = bitcast i32* %74 to <4 x i32>*
-  %wide.load.11 = load <4 x i32>, <4 x i32>* %75, align 4
-  %76 = getelementptr inbounds i32, i32* %y, i32 44
-  %77 = bitcast i32* %76 to <4 x i32>*
-  %wide.load10.11 = load <4 x i32>, <4 x i32>* %77, align 4
-  %78 = mul nsw <4 x i32> %wide.load10.11, %wide.load.11
-  %79 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %78)
-  %80 = add i32 %79, %73
-  %81 = getelementptr inbounds i32, i32* %x, i32 48
-  %82 = bitcast i32* %81 to <4 x i32>*
-  %wide.load.12 = load <4 x i32>, <4 x i32>* %82, align 4
-  %83 = getelementptr inbounds i32, i32* %y, i32 48
-  %84 = bitcast i32* %83 to <4 x i32>*
-  %wide.load10.12 = load <4 x i32>, <4 x i32>* %84, align 4
-  %85 = mul nsw <4 x i32> %wide.load10.12, %wide.load.12
-  %86 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %85)
-  %87 = add i32 %86, %80
-  %88 = getelementptr inbounds i32, i32* %x, i32 52
-  %89 = bitcast i32* %88 to <4 x i32>*
-  %wide.load.13 = load <4 x i32>, <4 x i32>* %89, align 4
-  %90 = getelementptr inbounds i32, i32* %y, i32 52
-  %91 = bitcast i32* %90 to <4 x i32>*
-  %wide.load10.13 = load <4 x i32>, <4 x i32>* %91, align 4
-  %92 = mul nsw <4 x i32> %wide.load10.13, %wide.load.13
-  %93 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %92)
-  %94 = add i32 %93, %87
-  %95 = getelementptr inbounds i32, i32* %x, i32 56
-  %96 = bitcast i32* %95 to <4 x i32>*
-  %wide.load.14 = load <4 x i32>, <4 x i32>* %96, align 4
-  %97 = getelementptr inbounds i32, i32* %y, i32 56
-  %98 = bitcast i32* %97 to <4 x i32>*
-  %wide.load10.14 = load <4 x i32>, <4 x i32>* %98, align 4
-  %99 = mul nsw <4 x i32> %wide.load10.14, %wide.load.14
-  %100 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %99)
-  %101 = add i32 %100, %94
-  %102 = getelementptr inbounds i32, i32* %x, i32 60
-  %103 = bitcast i32* %102 to <4 x i32>*
-  %wide.load.15 = load <4 x i32>, <4 x i32>* %103, align 4
-  %104 = getelementptr inbounds i32, i32* %y, i32 60
-  %105 = bitcast i32* %104 to <4 x i32>*
-  %wide.load10.15 = load <4 x i32>, <4 x i32>* %105, align 4
-  %106 = mul nsw <4 x i32> %wide.load10.15, %wide.load.15
-  %107 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %106)
-  %108 = add i32 %107, %101
-  ret i32 %108
-}
-
-define i32 @mlav128i32i32(i32* %x, i32* %y) {
+  %66 = add i32 %65, %61
+  %67 = getelementptr inbounds i32, ptr %x, i32 56
+  %wide.load.14 = load <4 x i32>, ptr %67, align 4
+  %68 = getelementptr inbounds i32, ptr %y, i32 56
+  %wide.load10.14 = load <4 x i32>, ptr %68, align 4
+  %69 = mul nsw <4 x i32> %wide.load10.14, %wide.load.14
+  %70 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %69)
+  %71 = add i32 %70, %66
+  %72 = getelementptr inbounds i32, ptr %x, i32 60
+  %wide.load.15 = load <4 x i32>, ptr %72, align 4
+  %73 = getelementptr inbounds i32, ptr %y, i32 60
+  %wide.load10.15 = load <4 x i32>, ptr %73, align 4
+  %74 = mul nsw <4 x i32> %wide.load10.15, %wide.load.15
+  %75 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %74)
+  %76 = add i32 %75, %71
+  ret i32 %76
+}
+
+define i32 @mlav128i32i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav128i32i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1893,295 +1728,231 @@ define i32 @mlav128i32i32(i32* %x, i32* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i32* %x to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %0, align 4
-  %1 = bitcast i32* %y to <4 x i32>*
-  %wide.load10 = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = mul nsw <4 x i32> %wide.load10, %wide.load
-  %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
-  %4 = getelementptr inbounds i32, i32* %x, i32 4
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load.1 = load <4 x i32>, <4 x i32>* %5, align 4
-  %6 = getelementptr inbounds i32, i32* %y, i32 4
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load10.1 = load <4 x i32>, <4 x i32>* %7, align 4
-  %8 = mul nsw <4 x i32> %wide.load10.1, %wide.load.1
-  %9 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %8)
-  %10 = add i32 %9, %3
-  %11 = getelementptr inbounds i32, i32* %x, i32 8
-  %12 = bitcast i32* %11 to <4 x i32>*
-  %wide.load.2 = load <4 x i32>, <4 x i32>* %12, align 4
-  %13 = getelementptr inbounds i32, i32* %y, i32 8
-  %14 = bitcast i32* %13 to <4 x i32>*
-  %wide.load10.2 = load <4 x i32>, <4 x i32>* %14, align 4
-  %15 = mul nsw <4 x i32> %wide.load10.2, %wide.load.2
-  %16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %15)
-  %17 = add i32 %16, %10
-  %18 = getelementptr inbounds i32, i32* %x, i32 12
-  %19 = bitcast i32* %18 to <4 x i32>*
-  %wide.load.3 = load <4 x i32>, <4 x i32>* %19, align 4
-  %20 = getelementptr inbounds i32, i32* %y, i32 12
-  %21 = bitcast i32* %20 to <4 x i32>*
-  %wide.load10.3 = load <4 x i32>, <4 x i32>* %21, align 4
-  %22 = mul nsw <4 x i32> %wide.load10.3, %wide.load.3
-  %23 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %22)
-  %24 = add i32 %23, %17
-  %25 = getelementptr inbounds i32, i32* %x, i32 16
-  %26 = bitcast i32* %25 to <4 x i32>*
-  %wide.load.4 = load <4 x i32>, <4 x i32>* %26, align 4
-  %27 = getelementptr inbounds i32, i32* %y, i32 16
-  %28 = bitcast i32* %27 to <4 x i32>*
-  %wide.load10.4 = load <4 x i32>, <4 x i32>* %28, align 4
-  %29 = mul nsw <4 x i32> %wide.load10.4, %wide.load.4
+  %wide.load = load <4 x i32>, ptr %x, align 4
+  %wide.load10 = load <4 x i32>, ptr %y, align 4
+  %0 = mul nsw <4 x i32> %wide.load10, %wide.load
+  %1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
+  %2 = getelementptr inbounds i32, ptr %x, i32 4
+  %wide.load.1 = load <4 x i32>, ptr %2, align 4
+  %3 = getelementptr inbounds i32, ptr %y, i32 4
+  %wide.load10.1 = load <4 x i32>, ptr %3, align 4
+  %4 = mul nsw <4 x i32> %wide.load10.1, %wide.load.1
+  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %4)
+  %6 = add i32 %5, %1
+  %7 = getelementptr inbounds i32, ptr %x, i32 8
+  %wide.load.2 = load <4 x i32>, ptr %7, align 4
+  %8 = getelementptr inbounds i32, ptr %y, i32 8
+  %wide.load10.2 = load <4 x i32>, ptr %8, align 4
+  %9 = mul nsw <4 x i32> %wide.load10.2, %wide.load.2
+  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
+  %11 = add i32 %10, %6
+  %12 = getelementptr inbounds i32, ptr %x, i32 12
+  %wide.load.3 = load <4 x i32>, ptr %12, align 4
+  %13 = getelementptr inbounds i32, ptr %y, i32 12
+  %wide.load10.3 = load <4 x i32>, ptr %13, align 4
+  %14 = mul nsw <4 x i32> %wide.load10.3, %wide.load.3
+  %15 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
+  %16 = add i32 %15, %11
+  %17 = getelementptr inbounds i32, ptr %x, i32 16
+  %wide.load.4 = load <4 x i32>, ptr %17, align 4
+  %18 = getelementptr inbounds i32, ptr %y, i32 16
+  %wide.load10.4 = load <4 x i32>, ptr %18, align 4
+  %19 = mul nsw <4 x i32> %wide.load10.4, %wide.load.4
+  %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %19)
+  %21 = add i32 %20, %16
+  %22 = getelementptr inbounds i32, ptr %x, i32 20
+  %wide.load.5 = load <4 x i32>, ptr %22, align 4
+  %23 = getelementptr inbounds i32, ptr %y, i32 20
+  %wide.load10.5 = load <4 x i32>, ptr %23, align 4
+  %24 = mul nsw <4 x i32> %wide.load10.5, %wide.load.5
+  %25 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %24)
+  %26 = add i32 %25, %21
+  %27 = getelementptr inbounds i32, ptr %x, i32 24
+  %wide.load.6 = load <4 x i32>, ptr %27, align 4
+  %28 = getelementptr inbounds i32, ptr %y, i32 24
+  %wide.load10.6 = load <4 x i32>, ptr %28, align 4
+  %29 = mul nsw <4 x i32> %wide.load10.6, %wide.load.6
   %30 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %29)
-  %31 = add i32 %30, %24
-  %32 = getelementptr inbounds i32, i32* %x, i32 20
-  %33 = bitcast i32* %32 to <4 x i32>*
-  %wide.load.5 = load <4 x i32>, <4 x i32>* %33, align 4
-  %34 = getelementptr inbounds i32, i32* %y, i32 20
-  %35 = bitcast i32* %34 to <4 x i32>*
-  %wide.load10.5 = load <4 x i32>, <4 x i32>* %35, align 4
-  %36 = mul nsw <4 x i32> %wide.load10.5, %wide.load.5
-  %37 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %36)
-  %38 = add i32 %37, %31
-  %39 = getelementptr inbounds i32, i32* %x, i32 24
-  %40 = bitcast i32* %39 to <4 x i32>*
-  %wide.load.6 = load <4 x i32>, <4 x i32>* %40, align 4
-  %41 = getelementptr inbounds i32, i32* %y, i32 24
-  %42 = bitcast i32* %41 to <4 x i32>*
-  %wide.load10.6 = load <4 x i32>, <4 x i32>* %42, align 4
-  %43 = mul nsw <4 x i32> %wide.load10.6, %wide.load.6
-  %44 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %43)
-  %45 = add i32 %44, %38
-  %46 = getelementptr inbounds i32, i32* %x, i32 28
-  %47 = bitcast i32* %46 to <4 x i32>*
-  %wide.load.7 = load <4 x i32>, <4 x i32>* %47, align 4
-  %48 = getelementptr inbounds i32, i32* %y, i32 28
-  %49 = bitcast i32* %48 to <4 x i32>*
-  %wide.load10.7 = load <4 x i32>, <4 x i32>* %49, align 4
-  %50 = mul nsw <4 x i32> %wide.load10.7, %wide.load.7
-  %51 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %50)
-  %52 = add i32 %51, %45
-  %53 = getelementptr inbounds i32, i32* %x, i32 32
-  %54 = bitcast i32* %53 to <4 x i32>*
-  %wide.load.8 = load <4 x i32>, <4 x i32>* %54, align 4
-  %55 = getelementptr inbounds i32, i32* %y, i32 32
-  %56 = bitcast i32* %55 to <4 x i32>*
-  %wide.load10.8 = load <4 x i32>, <4 x i32>* %56, align 4
-  %57 = mul nsw <4 x i32> %wide.load10.8, %wide.load.8
-  %58 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %57)
-  %59 = add i32 %58, %52
-  %60 = getelementptr inbounds i32, i32* %x, i32 36
-  %61 = bitcast i32* %60 to <4 x i32>*
-  %wide.load.9 = load <4 x i32>, <4 x i32>* %61, align 4
-  %62 = getelementptr inbounds i32, i32* %y, i32 36
-  %63 = bitcast i32* %62 to <4 x i32>*
-  %wide.load10.9 = load <4 x i32>, <4 x i32>* %63, align 4
-  %64 = mul nsw <4 x i32> %wide.load10.9, %wide.load.9
+  %31 = add i32 %30, %26
+  %32 = getelementptr inbounds i32, ptr %x, i32 28
+  %wide.load.7 = load <4 x i32>, ptr %32, align 4
+  %33 = getelementptr inbounds i32, ptr %y, i32 28
+  %wide.load10.7 = load <4 x i32>, ptr %33, align 4
+  %34 = mul nsw <4 x i32> %wide.load10.7, %wide.load.7
+  %35 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %34)
+  %36 = add i32 %35, %31
+  %37 = getelementptr inbounds i32, ptr %x, i32 32
+  %wide.load.8 = load <4 x i32>, ptr %37, align 4
+  %38 = getelementptr inbounds i32, ptr %y, i32 32
+  %wide.load10.8 = load <4 x i32>, ptr %38, align 4
+  %39 = mul nsw <4 x i32> %wide.load10.8, %wide.load.8
+  %40 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %39)
+  %41 = add i32 %40, %36
+  %42 = getelementptr inbounds i32, ptr %x, i32 36
+  %wide.load.9 = load <4 x i32>, ptr %42, align 4
+  %43 = getelementptr inbounds i32, ptr %y, i32 36
+  %wide.load10.9 = load <4 x i32>, ptr %43, align 4
+  %44 = mul nsw <4 x i32> %wide.load10.9, %wide.load.9
+  %45 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %44)
+  %46 = add i32 %45, %41
+  %47 = getelementptr inbounds i32, ptr %x, i32 40
+  %wide.load.10 = load <4 x i32>, ptr %47, align 4
+  %48 = getelementptr inbounds i32, ptr %y, i32 40
+  %wide.load10.10 = load <4 x i32>, ptr %48, align 4
+  %49 = mul nsw <4 x i32> %wide.load10.10, %wide.load.10
+  %50 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %49)
+  %51 = add i32 %50, %46
+  %52 = getelementptr inbounds i32, ptr %x, i32 44
+  %wide.load.11 = load <4 x i32>, ptr %52, align 4
+  %53 = getelementptr inbounds i32, ptr %y, i32 44
+  %wide.load10.11 = load <4 x i32>, ptr %53, align 4
+  %54 = mul nsw <4 x i32> %wide.load10.11, %wide.load.11
+  %55 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %54)
+  %56 = add i32 %55, %51
+  %57 = getelementptr inbounds i32, ptr %x, i32 48
+  %wide.load.12 = load <4 x i32>, ptr %57, align 4
+  %58 = getelementptr inbounds i32, ptr %y, i32 48
+  %wide.load10.12 = load <4 x i32>, ptr %58, align 4
+  %59 = mul nsw <4 x i32> %wide.load10.12, %wide.load.12
+  %60 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %59)
+  %61 = add i32 %60, %56
+  %62 = getelementptr inbounds i32, ptr %x, i32 52
+  %wide.load.13 = load <4 x i32>, ptr %62, align 4
+  %63 = getelementptr inbounds i32, ptr %y, i32 52
+  %wide.load10.13 = load <4 x i32>, ptr %63, align 4
+  %64 = mul nsw <4 x i32> %wide.load10.13, %wide.load.13
   %65 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %64)
-  %66 = add i32 %65, %59
-  %67 = getelementptr inbounds i32, i32* %x, i32 40
-  %68 = bitcast i32* %67 to <4 x i32>*
-  %wide.load.10 = load <4 x i32>, <4 x i32>* %68, align 4
-  %69 = getelementptr inbounds i32, i32* %y, i32 40
-  %70 = bitcast i32* %69 to <4 x i32>*
-  %wide.load10.10 = load <4 x i32>, <4 x i32>* %70, align 4
-  %71 = mul nsw <4 x i32> %wide.load10.10, %wide.load.10
-  %72 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %71)
-  %73 = add i32 %72, %66
-  %74 = getelementptr inbounds i32, i32* %x, i32 44
-  %75 = bitcast i32* %74 to <4 x i32>*
-  %wide.load.11 = load <4 x i32>, <4 x i32>* %75, align 4
-  %76 = getelementptr inbounds i32, i32* %y, i32 44
-  %77 = bitcast i32* %76 to <4 x i32>*
-  %wide.load10.11 = load <4 x i32>, <4 x i32>* %77, align 4
-  %78 = mul nsw <4 x i32> %wide.load10.11, %wide.load.11
-  %79 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %78)
-  %80 = add i32 %79, %73
-  %81 = getelementptr inbounds i32, i32* %x, i32 48
-  %82 = bitcast i32* %81 to <4 x i32>*
-  %wide.load.12 = load <4 x i32>, <4 x i32>* %82, align 4
-  %83 = getelementptr inbounds i32, i32* %y, i32 48
-  %84 = bitcast i32* %83 to <4 x i32>*
-  %wide.load10.12 = load <4 x i32>, <4 x i32>* %84, align 4
-  %85 = mul nsw <4 x i32> %wide.load10.12, %wide.load.12
-  %86 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %85)
-  %87 = add i32 %86, %80
-  %88 = getelementptr inbounds i32, i32* %x, i32 52
-  %89 = bitcast i32* %88 to <4 x i32>*
-  %wide.load.13 = load <4 x i32>, <4 x i32>* %89, align 4
-  %90 = getelementptr inbounds i32, i32* %y, i32 52
-  %91 = bitcast i32* %90 to <4 x i32>*
-  %wide.load10.13 = load <4 x i32>, <4 x i32>* %91, align 4
-  %92 = mul nsw <4 x i32> %wide.load10.13, %wide.load.13
-  %93 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %92)
-  %94 = add i32 %93, %87
-  %95 = getelementptr inbounds i32, i32* %x, i32 56
-  %96 = bitcast i32* %95 to <4 x i32>*
-  %wide.load.14 = load <4 x i32>, <4 x i32>* %96, align 4
-  %97 = getelementptr inbounds i32, i32* %y, i32 56
-  %98 = bitcast i32* %97 to <4 x i32>*
-  %wide.load10.14 = load <4 x i32>, <4 x i32>* %98, align 4
-  %99 = mul nsw <4 x i32> %wide.load10.14, %wide.load.14
+  %66 = add i32 %65, %61
+  %67 = getelementptr inbounds i32, ptr %x, i32 56
+  %wide.load.14 = load <4 x i32>, ptr %67, align 4
+  %68 = getelementptr inbounds i32, ptr %y, i32 56
+  %wide.load10.14 = load <4 x i32>, ptr %68, align 4
+  %69 = mul nsw <4 x i32> %wide.load10.14, %wide.load.14
+  %70 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %69)
+  %71 = add i32 %70, %66
+  %72 = getelementptr inbounds i32, ptr %x, i32 60
+  %wide.load.15 = load <4 x i32>, ptr %72, align 4
+  %73 = getelementptr inbounds i32, ptr %y, i32 60
+  %wide.load10.15 = load <4 x i32>, ptr %73, align 4
+  %74 = mul nsw <4 x i32> %wide.load10.15, %wide.load.15
+  %75 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %74)
+  %76 = add i32 %75, %71
+  %77 = getelementptr inbounds i32, ptr %x, i32 64
+  %wide.load.16 = load <4 x i32>, ptr %77, align 4
+  %78 = getelementptr inbounds i32, ptr %y, i32 64
+  %wide.load10.16 = load <4 x i32>, ptr %78, align 4
+  %79 = mul nsw <4 x i32> %wide.load10.16, %wide.load.16
+  %80 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %79)
+  %81 = add i32 %80, %76
+  %82 = getelementptr inbounds i32, ptr %x, i32 68
+  %wide.load.17 = load <4 x i32>, ptr %82, align 4
+  %83 = getelementptr inbounds i32, ptr %y, i32 68
+  %wide.load10.17 = load <4 x i32>, ptr %83, align 4
+  %84 = mul nsw <4 x i32> %wide.load10.17, %wide.load.17
+  %85 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %84)
+  %86 = add i32 %85, %81
+  %87 = getelementptr inbounds i32, ptr %x, i32 72
+  %wide.load.18 = load <4 x i32>, ptr %87, align 4
+  %88 = getelementptr inbounds i32, ptr %y, i32 72
+  %wide.load10.18 = load <4 x i32>, ptr %88, align 4
+  %89 = mul nsw <4 x i32> %wide.load10.18, %wide.load.18
+  %90 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %89)
+  %91 = add i32 %90, %86
+  %92 = getelementptr inbounds i32, ptr %x, i32 76
+  %wide.load.19 = load <4 x i32>, ptr %92, align 4
+  %93 = getelementptr inbounds i32, ptr %y, i32 76
+  %wide.load10.19 = load <4 x i32>, ptr %93, align 4
+  %94 = mul nsw <4 x i32> %wide.load10.19, %wide.load.19
+  %95 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %94)
+  %96 = add i32 %95, %91
+  %97 = getelementptr inbounds i32, ptr %x, i32 80
+  %wide.load.20 = load <4 x i32>, ptr %97, align 4
+  %98 = getelementptr inbounds i32, ptr %y, i32 80
+  %wide.load10.20 = load <4 x i32>, ptr %98, align 4
+  %99 = mul nsw <4 x i32> %wide.load10.20, %wide.load.20
   %100 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %99)
-  %101 = add i32 %100, %94
-  %102 = getelementptr inbounds i32, i32* %x, i32 60
-  %103 = bitcast i32* %102 to <4 x i32>*
-  %wide.load.15 = load <4 x i32>, <4 x i32>* %103, align 4
-  %104 = getelementptr inbounds i32, i32* %y, i32 60
-  %105 = bitcast i32* %104 to <4 x i32>*
-  %wide.load10.15 = load <4 x i32>, <4 x i32>* %105, align 4
-  %106 = mul nsw <4 x i32> %wide.load10.15, %wide.load.15
-  %107 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %106)
-  %108 = add i32 %107, %101
-  %109 = getelementptr inbounds i32, i32* %x, i32 64
-  %110 = bitcast i32* %109 to <4 x i32>*
-  %wide.load.16 = load <4 x i32>, <4 x i32>* %110, align 4
-  %111 = getelementptr inbounds i32, i32* %y, i32 64
-  %112 = bitcast i32* %111 to <4 x i32>*
-  %wide.load10.16 = load <4 x i32>, <4 x i32>* %112, align 4
-  %113 = mul nsw <4 x i32> %wide.load10.16, %wide.load.16
-  %114 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %113)
-  %115 = add i32 %114, %108
-  %116 = getelementptr inbounds i32, i32* %x, i32 68
-  %117 = bitcast i32* %116 to <4 x i32>*
-  %wide.load.17 = load <4 x i32>, <4 x i32>* %117, align 4
-  %118 = getelementptr inbounds i32, i32* %y, i32 68
-  %119 = bitcast i32* %118 to <4 x i32>*
-  %wide.load10.17 = load <4 x i32>, <4 x i32>* %119, align 4
-  %120 = mul nsw <4 x i32> %wide.load10.17, %wide.load.17
-  %121 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %120)
-  %122 = add i32 %121, %115
-  %123 = getelementptr inbounds i32, i32* %x, i32 72
-  %124 = bitcast i32* %123 to <4 x i32>*
-  %wide.load.18 = load <4 x i32>, <4 x i32>* %124, align 4
-  %125 = getelementptr inbounds i32, i32* %y, i32 72
-  %126 = bitcast i32* %125 to <4 x i32>*
-  %wide.load10.18 = load <4 x i32>, <4 x i32>* %126, align 4
-  %127 = mul nsw <4 x i32> %wide.load10.18, %wide.load.18
-  %128 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %127)
-  %129 = add i32 %128, %122
-  %130 = getelementptr inbounds i32, i32* %x, i32 76
-  %131 = bitcast i32* %130 to <4 x i32>*
-  %wide.load.19 = load <4 x i32>, <4 x i32>* %131, align 4
-  %132 = getelementptr inbounds i32, i32* %y, i32 76
-  %133 = bitcast i32* %132 to <4 x i32>*
-  %wide.load10.19 = load <4 x i32>, <4 x i32>* %133, align 4
-  %134 = mul nsw <4 x i32> %wide.load10.19, %wide.load.19
+  %101 = add i32 %100, %96
+  %102 = getelementptr inbounds i32, ptr %x, i32 84
+  %wide.load.21 = load <4 x i32>, ptr %102, align 4
+  %103 = getelementptr inbounds i32, ptr %y, i32 84
+  %wide.load10.21 = load <4 x i32>, ptr %103, align 4
+  %104 = mul nsw <4 x i32> %wide.load10.21, %wide.load.21
+  %105 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %104)
+  %106 = add i32 %105, %101
+  %107 = getelementptr inbounds i32, ptr %x, i32 88
+  %wide.load.22 = load <4 x i32>, ptr %107, align 4
+  %108 = getelementptr inbounds i32, ptr %y, i32 88
+  %wide.load10.22 = load <4 x i32>, ptr %108, align 4
+  %109 = mul nsw <4 x i32> %wide.load10.22, %wide.load.22
+  %110 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %109)
+  %111 = add i32 %110, %106
+  %112 = getelementptr inbounds i32, ptr %x, i32 92
+  %wide.load.23 = load <4 x i32>, ptr %112, align 4
+  %113 = getelementptr inbounds i32, ptr %y, i32 92
+  %wide.load10.23 = load <4 x i32>, ptr %113, align 4
+  %114 = mul nsw <4 x i32> %wide.load10.23, %wide.load.23
+  %115 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %114)
+  %116 = add i32 %115, %111
+  %117 = getelementptr inbounds i32, ptr %x, i32 96
+  %wide.load.24 = load <4 x i32>, ptr %117, align 4
+  %118 = getelementptr inbounds i32, ptr %y, i32 96
+  %wide.load10.24 = load <4 x i32>, ptr %118, align 4
+  %119 = mul nsw <4 x i32> %wide.load10.24, %wide.load.24
+  %120 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %119)
+  %121 = add i32 %120, %116
+  %122 = getelementptr inbounds i32, ptr %x, i32 100
+  %wide.load.25 = load <4 x i32>, ptr %122, align 4
+  %123 = getelementptr inbounds i32, ptr %y, i32 100
+  %wide.load10.25 = load <4 x i32>, ptr %123, align 4
+  %124 = mul nsw <4 x i32> %wide.load10.25, %wide.load.25
+  %125 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %124)
+  %126 = add i32 %125, %121
+  %127 = getelementptr inbounds i32, ptr %x, i32 104
+  %wide.load.26 = load <4 x i32>, ptr %127, align 4
+  %128 = getelementptr inbounds i32, ptr %y, i32 104
+  %wide.load10.26 = load <4 x i32>, ptr %128, align 4
+  %129 = mul nsw <4 x i32> %wide.load10.26, %wide.load.26
+  %130 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %129)
+  %131 = add i32 %130, %126
+  %132 = getelementptr inbounds i32, ptr %x, i32 108
+  %wide.load.27 = load <4 x i32>, ptr %132, align 4
+  %133 = getelementptr inbounds i32, ptr %y, i32 108
+  %wide.load10.27 = load <4 x i32>, ptr %133, align 4
+  %134 = mul nsw <4 x i32> %wide.load10.27, %wide.load.27
   %135 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %134)
-  %136 = add i32 %135, %129
-  %137 = getelementptr inbounds i32, i32* %x, i32 80
-  %138 = bitcast i32* %137 to <4 x i32>*
-  %wide.load.20 = load <4 x i32>, <4 x i32>* %138, align 4
-  %139 = getelementptr inbounds i32, i32* %y, i32 80
-  %140 = bitcast i32* %139 to <4 x i32>*
-  %wide.load10.20 = load <4 x i32>, <4 x i32>* %140, align 4
-  %141 = mul nsw <4 x i32> %wide.load10.20, %wide.load.20
-  %142 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %141)
-  %143 = add i32 %142, %136
-  %144 = getelementptr inbounds i32, i32* %x, i32 84
-  %145 = bitcast i32* %144 to <4 x i32>*
-  %wide.load.21 = load <4 x i32>, <4 x i32>* %145, align 4
-  %146 = getelementptr inbounds i32, i32* %y, i32 84
-  %147 = bitcast i32* %146 to <4 x i32>*
-  %wide.load10.21 = load <4 x i32>, <4 x i32>* %147, align 4
-  %148 = mul nsw <4 x i32> %wide.load10.21, %wide.load.21
-  %149 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %148)
-  %150 = add i32 %149, %143
-  %151 = getelementptr inbounds i32, i32* %x, i32 88
-  %152 = bitcast i32* %151 to <4 x i32>*
-  %wide.load.22 = load <4 x i32>, <4 x i32>* %152, align 4
-  %153 = getelementptr inbounds i32, i32* %y, i32 88
-  %154 = bitcast i32* %153 to <4 x i32>*
-  %wide.load10.22 = load <4 x i32>, <4 x i32>* %154, align 4
-  %155 = mul nsw <4 x i32> %wide.load10.22, %wide.load.22
-  %156 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %155)
-  %157 = add i32 %156, %150
-  %158 = getelementptr inbounds i32, i32* %x, i32 92
-  %159 = bitcast i32* %158 to <4 x i32>*
-  %wide.load.23 = load <4 x i32>, <4 x i32>* %159, align 4
-  %160 = getelementptr inbounds i32, i32* %y, i32 92
-  %161 = bitcast i32* %160 to <4 x i32>*
-  %wide.load10.23 = load <4 x i32>, <4 x i32>* %161, align 4
-  %162 = mul nsw <4 x i32> %wide.load10.23, %wide.load.23
-  %163 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %162)
-  %164 = add i32 %163, %157
-  %165 = getelementptr inbounds i32, i32* %x, i32 96
-  %166 = bitcast i32* %165 to <4 x i32>*
-  %wide.load.24 = load <4 x i32>, <4 x i32>* %166, align 4
-  %167 = getelementptr inbounds i32, i32* %y, i32 96
-  %168 = bitcast i32* %167 to <4 x i32>*
-  %wide.load10.24 = load <4 x i32>, <4 x i32>* %168, align 4
-  %169 = mul nsw <4 x i32> %wide.load10.24, %wide.load.24
-  %170 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %169)
-  %171 = add i32 %170, %164
-  %172 = getelementptr inbounds i32, i32* %x, i32 100
-  %173 = bitcast i32* %172 to <4 x i32>*
-  %wide.load.25 = load <4 x i32>, <4 x i32>* %173, align 4
-  %174 = getelementptr inbounds i32, i32* %y, i32 100
-  %175 = bitcast i32* %174 to <4 x i32>*
-  %wide.load10.25 = load <4 x i32>, <4 x i32>* %175, align 4
-  %176 = mul nsw <4 x i32> %wide.load10.25, %wide.load.25
-  %177 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %176)
-  %178 = add i32 %177, %171
-  %179 = getelementptr inbounds i32, i32* %x, i32 104
-  %180 = bitcast i32* %179 to <4 x i32>*
-  %wide.load.26 = load <4 x i32>, <4 x i32>* %180, align 4
-  %181 = getelementptr inbounds i32, i32* %y, i32 104
-  %182 = bitcast i32* %181 to <4 x i32>*
-  %wide.load10.26 = load <4 x i32>, <4 x i32>* %182, align 4
-  %183 = mul nsw <4 x i32> %wide.load10.26, %wide.load.26
-  %184 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %183)
-  %185 = add i32 %184, %178
-  %186 = getelementptr inbounds i32, i32* %x, i32 108
-  %187 = bitcast i32* %186 to <4 x i32>*
-  %wide.load.27 = load <4 x i32>, <4 x i32>* %187, align 4
-  %188 = getelementptr inbounds i32, i32* %y, i32 108
-  %189 = bitcast i32* %188 to <4 x i32>*
-  %wide.load10.27 = load <4 x i32>, <4 x i32>* %189, align 4
-  %190 = mul nsw <4 x i32> %wide.load10.27, %wide.load.27
-  %191 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %190)
-  %192 = add i32 %191, %185
-  %193 = getelementptr inbounds i32, i32* %x, i32 112
-  %194 = bitcast i32* %193 to <4 x i32>*
-  %wide.load.28 = load <4 x i32>, <4 x i32>* %194, align 4
-  %195 = getelementptr inbounds i32, i32* %y, i32 112
-  %196 = bitcast i32* %195 to <4 x i32>*
-  %wide.load10.28 = load <4 x i32>, <4 x i32>* %196, align 4
-  %197 = mul nsw <4 x i32> %wide.load10.28, %wide.load.28
-  %198 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %197)
-  %199 = add i32 %198, %192
-  %200 = getelementptr inbounds i32, i32* %x, i32 116
-  %201 = bitcast i32* %200 to <4 x i32>*
-  %wide.load.29 = load <4 x i32>, <4 x i32>* %201, align 4
-  %202 = getelementptr inbounds i32, i32* %y, i32 116
-  %203 = bitcast i32* %202 to <4 x i32>*
-  %wide.load10.29 = load <4 x i32>, <4 x i32>* %203, align 4
-  %204 = mul nsw <4 x i32> %wide.load10.29, %wide.load.29
-  %205 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %204)
-  %206 = add i32 %205, %199
-  %207 = getelementptr inbounds i32, i32* %x, i32 120
-  %208 = bitcast i32* %207 to <4 x i32>*
-  %wide.load.30 = load <4 x i32>, <4 x i32>* %208, align 4
-  %209 = getelementptr inbounds i32, i32* %y, i32 120
-  %210 = bitcast i32* %209 to <4 x i32>*
-  %wide.load10.30 = load <4 x i32>, <4 x i32>* %210, align 4
-  %211 = mul nsw <4 x i32> %wide.load10.30, %wide.load.30
-  %212 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %211)
-  %213 = add i32 %212, %206
-  %214 = getelementptr inbounds i32, i32* %x, i32 124
-  %215 = bitcast i32* %214 to <4 x i32>*
-  %wide.load.31 = load <4 x i32>, <4 x i32>* %215, align 4
-  %216 = getelementptr inbounds i32, i32* %y, i32 124
-  %217 = bitcast i32* %216 to <4 x i32>*
-  %wide.load10.31 = load <4 x i32>, <4 x i32>* %217, align 4
-  %218 = mul nsw <4 x i32> %wide.load10.31, %wide.load.31
-  %219 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %218)
-  %220 = add i32 %219, %213
-  ret i32 %220
-}
-
-define i32 @mlav2i32i16(i16* %x, i16* %y) {
+  %136 = add i32 %135, %131
+  %137 = getelementptr inbounds i32, ptr %x, i32 112
+  %wide.load.28 = load <4 x i32>, ptr %137, align 4
+  %138 = getelementptr inbounds i32, ptr %y, i32 112
+  %wide.load10.28 = load <4 x i32>, ptr %138, align 4
+  %139 = mul nsw <4 x i32> %wide.load10.28, %wide.load.28
+  %140 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %139)
+  %141 = add i32 %140, %136
+  %142 = getelementptr inbounds i32, ptr %x, i32 116
+  %wide.load.29 = load <4 x i32>, ptr %142, align 4
+  %143 = getelementptr inbounds i32, ptr %y, i32 116
+  %wide.load10.29 = load <4 x i32>, ptr %143, align 4
+  %144 = mul nsw <4 x i32> %wide.load10.29, %wide.load.29
+  %145 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %144)
+  %146 = add i32 %145, %141
+  %147 = getelementptr inbounds i32, ptr %x, i32 120
+  %wide.load.30 = load <4 x i32>, ptr %147, align 4
+  %148 = getelementptr inbounds i32, ptr %y, i32 120
+  %wide.load10.30 = load <4 x i32>, ptr %148, align 4
+  %149 = mul nsw <4 x i32> %wide.load10.30, %wide.load.30
+  %150 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %149)
+  %151 = add i32 %150, %146
+  %152 = getelementptr inbounds i32, ptr %x, i32 124
+  %wide.load.31 = load <4 x i32>, ptr %152, align 4
+  %153 = getelementptr inbounds i32, ptr %y, i32 124
+  %wide.load10.31 = load <4 x i32>, ptr %153, align 4
+  %154 = mul nsw <4 x i32> %wide.load10.31, %wide.load.31
+  %155 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %154)
+  %156 = add i32 %155, %151
+  ret i32 %156
+}
+
+define i32 @mlav2i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav2i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrsh.w r2, [r0]
@@ -2192,23 +1963,23 @@ define i32 @mlav2i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    smlabb r0, r3, r2, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i16, i16* %x, align 2
+  %0 = load i16, ptr %x, align 2
   %conv = sext i16 %0 to i32
-  %1 = load i16, i16* %y, align 2
+  %1 = load i16, ptr %y, align 2
   %conv2 = sext i16 %1 to i32
   %mul = mul nsw i32 %conv2, %conv
-  %arrayidx.1 = getelementptr inbounds i16, i16* %x, i32 1
-  %2 = load i16, i16* %arrayidx.1, align 2
+  %arrayidx.1 = getelementptr inbounds i16, ptr %x, i32 1
+  %2 = load i16, ptr %arrayidx.1, align 2
   %conv.1 = sext i16 %2 to i32
-  %arrayidx1.1 = getelementptr inbounds i16, i16* %y, i32 1
-  %3 = load i16, i16* %arrayidx1.1, align 2
+  %arrayidx1.1 = getelementptr inbounds i16, ptr %y, i32 1
+  %3 = load i16, ptr %arrayidx1.1, align 2
   %conv2.1 = sext i16 %3 to i32
   %mul.1 = mul nsw i32 %conv2.1, %conv.1
   %add.1 = add nsw i32 %mul.1, %mul
   ret i32 %add.1
 }
 
-define i32 @mlav4i32i16(i16* %x, i16* %y) {
+define i32 @mlav4i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav4i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -2216,18 +1987,16 @@ define i32 @mlav4i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlav.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = sext <4 x i16> %1 to <4 x i32>
-  %3 = bitcast i16* %y to <4 x i16>*
-  %4 = load <4 x i16>, <4 x i16>* %3, align 2
-  %5 = sext <4 x i16> %4 to <4 x i32>
-  %6 = mul nsw <4 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %6)
-  ret i32 %7
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = sext <4 x i16> %0 to <4 x i32>
+  %2 = load <4 x i16>, ptr %y, align 2
+  %3 = sext <4 x i16> %2 to <4 x i32>
+  %4 = mul nsw <4 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav8i32i16(i16* %x, i16* %y) {
+define i32 @mlav8i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav8i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -2235,18 +2004,16 @@ define i32 @mlav8i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlav.s16 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = sext <8 x i16> %1 to <8 x i32>
-  %3 = bitcast i16* %y to <8 x i16>*
-  %4 = load <8 x i16>, <8 x i16>* %3, align 2
-  %5 = sext <8 x i16> %4 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = sext <8 x i16> %0 to <8 x i32>
+  %2 = load <8 x i16>, ptr %y, align 2
+  %3 = sext <8 x i16> %2 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav16i32i16(i16* %x, i16* %y) {
+define i32 @mlav16i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav16i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -2264,18 +2031,16 @@ define i32 @mlav16i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = sext <16 x i16> %1 to <16 x i32>
-  %3 = bitcast i16* %y to <16 x i16>*
-  %4 = load <16 x i16>, <16 x i16>* %3, align 2
-  %5 = sext <16 x i16> %4 to <16 x i32>
-  %6 = mul nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  ret i32 %7
+  %0 = load <16 x i16>, ptr %x, align 2
+  %1 = sext <16 x i16> %0 to <16 x i32>
+  %2 = load <16 x i16>, ptr %y, align 2
+  %3 = sext <16 x i16> %2 to <16 x i32>
+  %4 = mul nsw <16 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav24i32i16(i16* %x, i16* %y) {
+define i32 @mlav24i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav24i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -2296,29 +2061,25 @@ define i32 @mlav24i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = sext <8 x i16> %1 to <8 x i32>
-  %3 = bitcast i16* %y to <8 x i16>*
-  %4 = load <8 x i16>, <8 x i16>* %3, align 2
-  %5 = sext <8 x i16> %4 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %arrayidx.8 = getelementptr inbounds i16, i16* %x, i32 8
-  %arrayidx1.8 = getelementptr inbounds i16, i16* %y, i32 8
-  %7 = bitcast i16* %arrayidx.8 to <16 x i16>*
-  %8 = load <16 x i16>, <16 x i16>* %7, align 2
-  %9 = sext <16 x i16> %8 to <16 x i32>
-  %10 = bitcast i16* %arrayidx1.8 to <16 x i16>*
-  %11 = load <16 x i16>, <16 x i16>* %10, align 2
-  %12 = sext <16 x i16> %11 to <16 x i32>
-  %13 = mul nsw <16 x i32> %12, %9
-  %14 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %13)
-  %15 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  %op.rdx = add nsw i32 %14, %15
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = sext <8 x i16> %0 to <8 x i32>
+  %2 = load <8 x i16>, ptr %y, align 2
+  %3 = sext <8 x i16> %2 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %arrayidx.8 = getelementptr inbounds i16, ptr %x, i32 8
+  %arrayidx1.8 = getelementptr inbounds i16, ptr %y, i32 8
+  %5 = load <16 x i16>, ptr %arrayidx.8, align 2
+  %6 = sext <16 x i16> %5 to <16 x i32>
+  %7 = load <16 x i16>, ptr %arrayidx1.8, align 2
+  %8 = sext <16 x i16> %7 to <16 x i32>
+  %9 = mul nsw <16 x i32> %8, %6
+  %10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %9)
+  %11 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  %op.rdx = add nsw i32 %10, %11
   ret i32 %op.rdx
 }
 
-define i32 @mlav32i32i16(i16* %x, i16* %y) {
+define i32 @mlav32i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav32i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r0]
@@ -2348,18 +2109,16 @@ define i32 @mlav32i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <32 x i16>*
-  %1 = load <32 x i16>, <32 x i16>* %0, align 2
-  %2 = sext <32 x i16> %1 to <32 x i32>
-  %3 = bitcast i16* %y to <32 x i16>*
-  %4 = load <32 x i16>, <32 x i16>* %3, align 2
-  %5 = sext <32 x i16> %4 to <32 x i32>
-  %6 = mul nsw <32 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %6)
-  ret i32 %7
+  %0 = load <32 x i16>, ptr %x, align 2
+  %1 = sext <32 x i16> %0 to <32 x i32>
+  %2 = load <32 x i16>, ptr %y, align 2
+  %3 = sext <32 x i16> %2 to <32 x i32>
+  %4 = mul nsw <32 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav64i32i16(i16* %x, i16* %y) {
+define i32 @mlav64i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav64i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -2389,95 +2148,79 @@ define i32 @mlav64i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlava.s16 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = sext <8 x i16> %wide.load to <8 x i32>
-  %2 = bitcast i16* %y to <8 x i16>*
-  %wide.load11 = load <8 x i16>, <8 x i16>* %2, align 2
-  %3 = sext <8 x i16> %wide.load11 to <8 x i32>
-  %4 = mul nsw <8 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
-  %6 = getelementptr inbounds i16, i16* %x, i32 8
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = sext <8 x i16> %wide.load.1 to <8 x i32>
-  %9 = getelementptr inbounds i16, i16* %y, i32 8
-  %10 = bitcast i16* %9 to <8 x i16>*
-  %wide.load11.1 = load <8 x i16>, <8 x i16>* %10, align 2
-  %11 = sext <8 x i16> %wide.load11.1 to <8 x i32>
-  %12 = mul nsw <8 x i32> %11, %8
-  %13 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %12)
-  %14 = add i32 %13, %5
-  %15 = getelementptr inbounds i16, i16* %x, i32 16
-  %16 = bitcast i16* %15 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %16, align 2
-  %17 = sext <8 x i16> %wide.load.2 to <8 x i32>
-  %18 = getelementptr inbounds i16, i16* %y, i32 16
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load11.2 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = sext <8 x i16> %wide.load11.2 to <8 x i32>
-  %21 = mul nsw <8 x i32> %20, %17
-  %22 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %21)
-  %23 = add i32 %22, %14
-  %24 = getelementptr inbounds i16, i16* %x, i32 24
-  %25 = bitcast i16* %24 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %25, align 2
-  %26 = sext <8 x i16> %wide.load.3 to <8 x i32>
-  %27 = getelementptr inbounds i16, i16* %y, i32 24
-  %28 = bitcast i16* %27 to <8 x i16>*
-  %wide.load11.3 = load <8 x i16>, <8 x i16>* %28, align 2
-  %29 = sext <8 x i16> %wide.load11.3 to <8 x i32>
-  %30 = mul nsw <8 x i32> %29, %26
-  %31 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %30)
-  %32 = add i32 %31, %23
-  %33 = getelementptr inbounds i16, i16* %x, i32 32
-  %34 = bitcast i16* %33 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %34, align 2
-  %35 = sext <8 x i16> %wide.load.4 to <8 x i32>
-  %36 = getelementptr inbounds i16, i16* %y, i32 32
-  %37 = bitcast i16* %36 to <8 x i16>*
-  %wide.load11.4 = load <8 x i16>, <8 x i16>* %37, align 2
-  %38 = sext <8 x i16> %wide.load11.4 to <8 x i32>
-  %39 = mul nsw <8 x i32> %38, %35
-  %40 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %39)
-  %41 = add i32 %40, %32
-  %42 = getelementptr inbounds i16, i16* %x, i32 40
-  %43 = bitcast i16* %42 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %43, align 2
-  %44 = sext <8 x i16> %wide.load.5 to <8 x i32>
-  %45 = getelementptr inbounds i16, i16* %y, i32 40
-  %46 = bitcast i16* %45 to <8 x i16>*
-  %wide.load11.5 = load <8 x i16>, <8 x i16>* %46, align 2
-  %47 = sext <8 x i16> %wide.load11.5 to <8 x i32>
-  %48 = mul nsw <8 x i32> %47, %44
-  %49 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %48)
-  %50 = add i32 %49, %41
-  %51 = getelementptr inbounds i16, i16* %x, i32 48
-  %52 = bitcast i16* %51 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %52, align 2
-  %53 = sext <8 x i16> %wide.load.6 to <8 x i32>
-  %54 = getelementptr inbounds i16, i16* %y, i32 48
-  %55 = bitcast i16* %54 to <8 x i16>*
-  %wide.load11.6 = load <8 x i16>, <8 x i16>* %55, align 2
-  %56 = sext <8 x i16> %wide.load11.6 to <8 x i32>
-  %57 = mul nsw <8 x i32> %56, %53
-  %58 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %57)
-  %59 = add i32 %58, %50
-  %60 = getelementptr inbounds i16, i16* %x, i32 56
-  %61 = bitcast i16* %60 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %61, align 2
-  %62 = sext <8 x i16> %wide.load.7 to <8 x i32>
-  %63 = getelementptr inbounds i16, i16* %y, i32 56
-  %64 = bitcast i16* %63 to <8 x i16>*
-  %wide.load11.7 = load <8 x i16>, <8 x i16>* %64, align 2
-  %65 = sext <8 x i16> %wide.load11.7 to <8 x i32>
-  %66 = mul nsw <8 x i32> %65, %62
-  %67 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %66)
-  %68 = add i32 %67, %59
-  ret i32 %68
-}
-
-define i32 @mlav128i32i16(i16* %x, i16* %y) {
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  %wide.load11 = load <8 x i16>, ptr %y, align 2
+  %1 = sext <8 x i16> %wide.load11 to <8 x i32>
+  %2 = mul nsw <8 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  %4 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %4, align 2
+  %5 = sext <8 x i16> %wide.load.1 to <8 x i32>
+  %6 = getelementptr inbounds i16, ptr %y, i32 8
+  %wide.load11.1 = load <8 x i16>, ptr %6, align 2
+  %7 = sext <8 x i16> %wide.load11.1 to <8 x i32>
+  %8 = mul nsw <8 x i32> %7, %5
+  %9 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %8)
+  %10 = add i32 %9, %3
+  %11 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %11, align 2
+  %12 = sext <8 x i16> %wide.load.2 to <8 x i32>
+  %13 = getelementptr inbounds i16, ptr %y, i32 16
+  %wide.load11.2 = load <8 x i16>, ptr %13, align 2
+  %14 = sext <8 x i16> %wide.load11.2 to <8 x i32>
+  %15 = mul nsw <8 x i32> %14, %12
+  %16 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %15)
+  %17 = add i32 %16, %10
+  %18 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %18, align 2
+  %19 = sext <8 x i16> %wide.load.3 to <8 x i32>
+  %20 = getelementptr inbounds i16, ptr %y, i32 24
+  %wide.load11.3 = load <8 x i16>, ptr %20, align 2
+  %21 = sext <8 x i16> %wide.load11.3 to <8 x i32>
+  %22 = mul nsw <8 x i32> %21, %19
+  %23 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %22)
+  %24 = add i32 %23, %17
+  %25 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %25, align 2
+  %26 = sext <8 x i16> %wide.load.4 to <8 x i32>
+  %27 = getelementptr inbounds i16, ptr %y, i32 32
+  %wide.load11.4 = load <8 x i16>, ptr %27, align 2
+  %28 = sext <8 x i16> %wide.load11.4 to <8 x i32>
+  %29 = mul nsw <8 x i32> %28, %26
+  %30 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %29)
+  %31 = add i32 %30, %24
+  %32 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %32, align 2
+  %33 = sext <8 x i16> %wide.load.5 to <8 x i32>
+  %34 = getelementptr inbounds i16, ptr %y, i32 40
+  %wide.load11.5 = load <8 x i16>, ptr %34, align 2
+  %35 = sext <8 x i16> %wide.load11.5 to <8 x i32>
+  %36 = mul nsw <8 x i32> %35, %33
+  %37 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %36)
+  %38 = add i32 %37, %31
+  %39 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %39, align 2
+  %40 = sext <8 x i16> %wide.load.6 to <8 x i32>
+  %41 = getelementptr inbounds i16, ptr %y, i32 48
+  %wide.load11.6 = load <8 x i16>, ptr %41, align 2
+  %42 = sext <8 x i16> %wide.load11.6 to <8 x i32>
+  %43 = mul nsw <8 x i32> %42, %40
+  %44 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %43)
+  %45 = add i32 %44, %38
+  %46 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %46, align 2
+  %47 = sext <8 x i16> %wide.load.7 to <8 x i32>
+  %48 = getelementptr inbounds i16, ptr %y, i32 56
+  %wide.load11.7 = load <8 x i16>, ptr %48, align 2
+  %49 = sext <8 x i16> %wide.load11.7 to <8 x i32>
+  %50 = mul nsw <8 x i32> %49, %47
+  %51 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %50)
+  %52 = add i32 %51, %45
+  ret i32 %52
+}
+
+define i32 @mlav128i32i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav128i32i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -2531,183 +2274,151 @@ define i32 @mlav128i32i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    vmlava.s16 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = sext <8 x i16> %wide.load to <8 x i32>
-  %2 = bitcast i16* %y to <8 x i16>*
-  %wide.load11 = load <8 x i16>, <8 x i16>* %2, align 2
-  %3 = sext <8 x i16> %wide.load11 to <8 x i32>
-  %4 = mul nsw <8 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
-  %6 = getelementptr inbounds i16, i16* %x, i32 8
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = sext <8 x i16> %wide.load.1 to <8 x i32>
-  %9 = getelementptr inbounds i16, i16* %y, i32 8
-  %10 = bitcast i16* %9 to <8 x i16>*
-  %wide.load11.1 = load <8 x i16>, <8 x i16>* %10, align 2
-  %11 = sext <8 x i16> %wide.load11.1 to <8 x i32>
-  %12 = mul nsw <8 x i32> %11, %8
-  %13 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %12)
-  %14 = add i32 %13, %5
-  %15 = getelementptr inbounds i16, i16* %x, i32 16
-  %16 = bitcast i16* %15 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %16, align 2
-  %17 = sext <8 x i16> %wide.load.2 to <8 x i32>
-  %18 = getelementptr inbounds i16, i16* %y, i32 16
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load11.2 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = sext <8 x i16> %wide.load11.2 to <8 x i32>
-  %21 = mul nsw <8 x i32> %20, %17
-  %22 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %21)
-  %23 = add i32 %22, %14
-  %24 = getelementptr inbounds i16, i16* %x, i32 24
-  %25 = bitcast i16* %24 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %25, align 2
-  %26 = sext <8 x i16> %wide.load.3 to <8 x i32>
-  %27 = getelementptr inbounds i16, i16* %y, i32 24
-  %28 = bitcast i16* %27 to <8 x i16>*
-  %wide.load11.3 = load <8 x i16>, <8 x i16>* %28, align 2
-  %29 = sext <8 x i16> %wide.load11.3 to <8 x i32>
-  %30 = mul nsw <8 x i32> %29, %26
-  %31 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %30)
-  %32 = add i32 %31, %23
-  %33 = getelementptr inbounds i16, i16* %x, i32 32
-  %34 = bitcast i16* %33 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %34, align 2
-  %35 = sext <8 x i16> %wide.load.4 to <8 x i32>
-  %36 = getelementptr inbounds i16, i16* %y, i32 32
-  %37 = bitcast i16* %36 to <8 x i16>*
-  %wide.load11.4 = load <8 x i16>, <8 x i16>* %37, align 2
-  %38 = sext <8 x i16> %wide.load11.4 to <8 x i32>
-  %39 = mul nsw <8 x i32> %38, %35
-  %40 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %39)
-  %41 = add i32 %40, %32
-  %42 = getelementptr inbounds i16, i16* %x, i32 40
-  %43 = bitcast i16* %42 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %43, align 2
-  %44 = sext <8 x i16> %wide.load.5 to <8 x i32>
-  %45 = getelementptr inbounds i16, i16* %y, i32 40
-  %46 = bitcast i16* %45 to <8 x i16>*
-  %wide.load11.5 = load <8 x i16>, <8 x i16>* %46, align 2
-  %47 = sext <8 x i16> %wide.load11.5 to <8 x i32>
-  %48 = mul nsw <8 x i32> %47, %44
-  %49 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %48)
-  %50 = add i32 %49, %41
-  %51 = getelementptr inbounds i16, i16* %x, i32 48
-  %52 = bitcast i16* %51 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %52, align 2
-  %53 = sext <8 x i16> %wide.load.6 to <8 x i32>
-  %54 = getelementptr inbounds i16, i16* %y, i32 48
-  %55 = bitcast i16* %54 to <8 x i16>*
-  %wide.load11.6 = load <8 x i16>, <8 x i16>* %55, align 2
-  %56 = sext <8 x i16> %wide.load11.6 to <8 x i32>
-  %57 = mul nsw <8 x i32> %56, %53
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  %wide.load11 = load <8 x i16>, ptr %y, align 2
+  %1 = sext <8 x i16> %wide.load11 to <8 x i32>
+  %2 = mul nsw <8 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
+  %4 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %4, align 2
+  %5 = sext <8 x i16> %wide.load.1 to <8 x i32>
+  %6 = getelementptr inbounds i16, ptr %y, i32 8
+  %wide.load11.1 = load <8 x i16>, ptr %6, align 2
+  %7 = sext <8 x i16> %wide.load11.1 to <8 x i32>
+  %8 = mul nsw <8 x i32> %7, %5
+  %9 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %8)
+  %10 = add i32 %9, %3
+  %11 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %11, align 2
+  %12 = sext <8 x i16> %wide.load.2 to <8 x i32>
+  %13 = getelementptr inbounds i16, ptr %y, i32 16
+  %wide.load11.2 = load <8 x i16>, ptr %13, align 2
+  %14 = sext <8 x i16> %wide.load11.2 to <8 x i32>
+  %15 = mul nsw <8 x i32> %14, %12
+  %16 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %15)
+  %17 = add i32 %16, %10
+  %18 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %18, align 2
+  %19 = sext <8 x i16> %wide.load.3 to <8 x i32>
+  %20 = getelementptr inbounds i16, ptr %y, i32 24
+  %wide.load11.3 = load <8 x i16>, ptr %20, align 2
+  %21 = sext <8 x i16> %wide.load11.3 to <8 x i32>
+  %22 = mul nsw <8 x i32> %21, %19
+  %23 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %22)
+  %24 = add i32 %23, %17
+  %25 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %25, align 2
+  %26 = sext <8 x i16> %wide.load.4 to <8 x i32>
+  %27 = getelementptr inbounds i16, ptr %y, i32 32
+  %wide.load11.4 = load <8 x i16>, ptr %27, align 2
+  %28 = sext <8 x i16> %wide.load11.4 to <8 x i32>
+  %29 = mul nsw <8 x i32> %28, %26
+  %30 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %29)
+  %31 = add i32 %30, %24
+  %32 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %32, align 2
+  %33 = sext <8 x i16> %wide.load.5 to <8 x i32>
+  %34 = getelementptr inbounds i16, ptr %y, i32 40
+  %wide.load11.5 = load <8 x i16>, ptr %34, align 2
+  %35 = sext <8 x i16> %wide.load11.5 to <8 x i32>
+  %36 = mul nsw <8 x i32> %35, %33
+  %37 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %36)
+  %38 = add i32 %37, %31
+  %39 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %39, align 2
+  %40 = sext <8 x i16> %wide.load.6 to <8 x i32>
+  %41 = getelementptr inbounds i16, ptr %y, i32 48
+  %wide.load11.6 = load <8 x i16>, ptr %41, align 2
+  %42 = sext <8 x i16> %wide.load11.6 to <8 x i32>
+  %43 = mul nsw <8 x i32> %42, %40
+  %44 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %43)
+  %45 = add i32 %44, %38
+  %46 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %46, align 2
+  %47 = sext <8 x i16> %wide.load.7 to <8 x i32>
+  %48 = getelementptr inbounds i16, ptr %y, i32 56
+  %wide.load11.7 = load <8 x i16>, ptr %48, align 2
+  %49 = sext <8 x i16> %wide.load11.7 to <8 x i32>
+  %50 = mul nsw <8 x i32> %49, %47
+  %51 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %50)
+  %52 = add i32 %51, %45
+  %53 = getelementptr inbounds i16, ptr %x, i32 64
+  %wide.load.8 = load <8 x i16>, ptr %53, align 2
+  %54 = sext <8 x i16> %wide.load.8 to <8 x i32>
+  %55 = getelementptr inbounds i16, ptr %y, i32 64
+  %wide.load11.8 = load <8 x i16>, ptr %55, align 2
+  %56 = sext <8 x i16> %wide.load11.8 to <8 x i32>
+  %57 = mul nsw <8 x i32> %56, %54
   %58 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %57)
-  %59 = add i32 %58, %50
-  %60 = getelementptr inbounds i16, i16* %x, i32 56
-  %61 = bitcast i16* %60 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %61, align 2
-  %62 = sext <8 x i16> %wide.load.7 to <8 x i32>
-  %63 = getelementptr inbounds i16, i16* %y, i32 56
-  %64 = bitcast i16* %63 to <8 x i16>*
-  %wide.load11.7 = load <8 x i16>, <8 x i16>* %64, align 2
-  %65 = sext <8 x i16> %wide.load11.7 to <8 x i32>
-  %66 = mul nsw <8 x i32> %65, %62
-  %67 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %66)
-  %68 = add i32 %67, %59
-  %69 = getelementptr inbounds i16, i16* %x, i32 64
-  %70 = bitcast i16* %69 to <8 x i16>*
-  %wide.load.8 = load <8 x i16>, <8 x i16>* %70, align 2
-  %71 = sext <8 x i16> %wide.load.8 to <8 x i32>
-  %72 = getelementptr inbounds i16, i16* %y, i32 64
-  %73 = bitcast i16* %72 to <8 x i16>*
-  %wide.load11.8 = load <8 x i16>, <8 x i16>* %73, align 2
-  %74 = sext <8 x i16> %wide.load11.8 to <8 x i32>
-  %75 = mul nsw <8 x i32> %74, %71
-  %76 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %75)
-  %77 = add i32 %76, %68
-  %78 = getelementptr inbounds i16, i16* %x, i32 72
-  %79 = bitcast i16* %78 to <8 x i16>*
-  %wide.load.9 = load <8 x i16>, <8 x i16>* %79, align 2
-  %80 = sext <8 x i16> %wide.load.9 to <8 x i32>
-  %81 = getelementptr inbounds i16, i16* %y, i32 72
-  %82 = bitcast i16* %81 to <8 x i16>*
-  %wide.load11.9 = load <8 x i16>, <8 x i16>* %82, align 2
-  %83 = sext <8 x i16> %wide.load11.9 to <8 x i32>
-  %84 = mul nsw <8 x i32> %83, %80
-  %85 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %84)
-  %86 = add i32 %85, %77
-  %87 = getelementptr inbounds i16, i16* %x, i32 80
-  %88 = bitcast i16* %87 to <8 x i16>*
-  %wide.load.10 = load <8 x i16>, <8 x i16>* %88, align 2
-  %89 = sext <8 x i16> %wide.load.10 to <8 x i32>
-  %90 = getelementptr inbounds i16, i16* %y, i32 80
-  %91 = bitcast i16* %90 to <8 x i16>*
-  %wide.load11.10 = load <8 x i16>, <8 x i16>* %91, align 2
-  %92 = sext <8 x i16> %wide.load11.10 to <8 x i32>
-  %93 = mul nsw <8 x i32> %92, %89
-  %94 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %93)
-  %95 = add i32 %94, %86
-  %96 = getelementptr inbounds i16, i16* %x, i32 88
-  %97 = bitcast i16* %96 to <8 x i16>*
-  %wide.load.11 = load <8 x i16>, <8 x i16>* %97, align 2
-  %98 = sext <8 x i16> %wide.load.11 to <8 x i32>
-  %99 = getelementptr inbounds i16, i16* %y, i32 88
-  %100 = bitcast i16* %99 to <8 x i16>*
-  %wide.load11.11 = load <8 x i16>, <8 x i16>* %100, align 2
-  %101 = sext <8 x i16> %wide.load11.11 to <8 x i32>
-  %102 = mul nsw <8 x i32> %101, %98
-  %103 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %102)
-  %104 = add i32 %103, %95
-  %105 = getelementptr inbounds i16, i16* %x, i32 96
-  %106 = bitcast i16* %105 to <8 x i16>*
-  %wide.load.12 = load <8 x i16>, <8 x i16>* %106, align 2
-  %107 = sext <8 x i16> %wide.load.12 to <8 x i32>
-  %108 = getelementptr inbounds i16, i16* %y, i32 96
-  %109 = bitcast i16* %108 to <8 x i16>*
-  %wide.load11.12 = load <8 x i16>, <8 x i16>* %109, align 2
-  %110 = sext <8 x i16> %wide.load11.12 to <8 x i32>
-  %111 = mul nsw <8 x i32> %110, %107
-  %112 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %111)
-  %113 = add i32 %112, %104
-  %114 = getelementptr inbounds i16, i16* %x, i32 104
-  %115 = bitcast i16* %114 to <8 x i16>*
-  %wide.load.13 = load <8 x i16>, <8 x i16>* %115, align 2
-  %116 = sext <8 x i16> %wide.load.13 to <8 x i32>
-  %117 = getelementptr inbounds i16, i16* %y, i32 104
-  %118 = bitcast i16* %117 to <8 x i16>*
-  %wide.load11.13 = load <8 x i16>, <8 x i16>* %118, align 2
-  %119 = sext <8 x i16> %wide.load11.13 to <8 x i32>
-  %120 = mul nsw <8 x i32> %119, %116
-  %121 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %120)
-  %122 = add i32 %121, %113
-  %123 = getelementptr inbounds i16, i16* %x, i32 112
-  %124 = bitcast i16* %123 to <8 x i16>*
-  %wide.load.14 = load <8 x i16>, <8 x i16>* %124, align 2
-  %125 = sext <8 x i16> %wide.load.14 to <8 x i32>
-  %126 = getelementptr inbounds i16, i16* %y, i32 112
-  %127 = bitcast i16* %126 to <8 x i16>*
-  %wide.load11.14 = load <8 x i16>, <8 x i16>* %127, align 2
-  %128 = sext <8 x i16> %wide.load11.14 to <8 x i32>
-  %129 = mul nsw <8 x i32> %128, %125
-  %130 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %129)
-  %131 = add i32 %130, %122
-  %132 = getelementptr inbounds i16, i16* %x, i32 120
-  %133 = bitcast i16* %132 to <8 x i16>*
-  %wide.load.15 = load <8 x i16>, <8 x i16>* %133, align 2
-  %134 = sext <8 x i16> %wide.load.15 to <8 x i32>
-  %135 = getelementptr inbounds i16, i16* %y, i32 120
-  %136 = bitcast i16* %135 to <8 x i16>*
-  %wide.load11.15 = load <8 x i16>, <8 x i16>* %136, align 2
-  %137 = sext <8 x i16> %wide.load11.15 to <8 x i32>
-  %138 = mul nsw <8 x i32> %137, %134
-  %139 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %138)
-  %140 = add i32 %139, %131
-  ret i32 %140
-}
-
-define i32 @mlav2i32i8(i8* %x, i8* %y) {
+  %59 = add i32 %58, %52
+  %60 = getelementptr inbounds i16, ptr %x, i32 72
+  %wide.load.9 = load <8 x i16>, ptr %60, align 2
+  %61 = sext <8 x i16> %wide.load.9 to <8 x i32>
+  %62 = getelementptr inbounds i16, ptr %y, i32 72
+  %wide.load11.9 = load <8 x i16>, ptr %62, align 2
+  %63 = sext <8 x i16> %wide.load11.9 to <8 x i32>
+  %64 = mul nsw <8 x i32> %63, %61
+  %65 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %64)
+  %66 = add i32 %65, %59
+  %67 = getelementptr inbounds i16, ptr %x, i32 80
+  %wide.load.10 = load <8 x i16>, ptr %67, align 2
+  %68 = sext <8 x i16> %wide.load.10 to <8 x i32>
+  %69 = getelementptr inbounds i16, ptr %y, i32 80
+  %wide.load11.10 = load <8 x i16>, ptr %69, align 2
+  %70 = sext <8 x i16> %wide.load11.10 to <8 x i32>
+  %71 = mul nsw <8 x i32> %70, %68
+  %72 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %71)
+  %73 = add i32 %72, %66
+  %74 = getelementptr inbounds i16, ptr %x, i32 88
+  %wide.load.11 = load <8 x i16>, ptr %74, align 2
+  %75 = sext <8 x i16> %wide.load.11 to <8 x i32>
+  %76 = getelementptr inbounds i16, ptr %y, i32 88
+  %wide.load11.11 = load <8 x i16>, ptr %76, align 2
+  %77 = sext <8 x i16> %wide.load11.11 to <8 x i32>
+  %78 = mul nsw <8 x i32> %77, %75
+  %79 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %78)
+  %80 = add i32 %79, %73
+  %81 = getelementptr inbounds i16, ptr %x, i32 96
+  %wide.load.12 = load <8 x i16>, ptr %81, align 2
+  %82 = sext <8 x i16> %wide.load.12 to <8 x i32>
+  %83 = getelementptr inbounds i16, ptr %y, i32 96
+  %wide.load11.12 = load <8 x i16>, ptr %83, align 2
+  %84 = sext <8 x i16> %wide.load11.12 to <8 x i32>
+  %85 = mul nsw <8 x i32> %84, %82
+  %86 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %85)
+  %87 = add i32 %86, %80
+  %88 = getelementptr inbounds i16, ptr %x, i32 104
+  %wide.load.13 = load <8 x i16>, ptr %88, align 2
+  %89 = sext <8 x i16> %wide.load.13 to <8 x i32>
+  %90 = getelementptr inbounds i16, ptr %y, i32 104
+  %wide.load11.13 = load <8 x i16>, ptr %90, align 2
+  %91 = sext <8 x i16> %wide.load11.13 to <8 x i32>
+  %92 = mul nsw <8 x i32> %91, %89
+  %93 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %92)
+  %94 = add i32 %93, %87
+  %95 = getelementptr inbounds i16, ptr %x, i32 112
+  %wide.load.14 = load <8 x i16>, ptr %95, align 2
+  %96 = sext <8 x i16> %wide.load.14 to <8 x i32>
+  %97 = getelementptr inbounds i16, ptr %y, i32 112
+  %wide.load11.14 = load <8 x i16>, ptr %97, align 2
+  %98 = sext <8 x i16> %wide.load11.14 to <8 x i32>
+  %99 = mul nsw <8 x i32> %98, %96
+  %100 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %99)
+  %101 = add i32 %100, %94
+  %102 = getelementptr inbounds i16, ptr %x, i32 120
+  %wide.load.15 = load <8 x i16>, ptr %102, align 2
+  %103 = sext <8 x i16> %wide.load.15 to <8 x i32>
+  %104 = getelementptr inbounds i16, ptr %y, i32 120
+  %wide.load11.15 = load <8 x i16>, ptr %104, align 2
+  %105 = sext <8 x i16> %wide.load11.15 to <8 x i32>
+  %106 = mul nsw <8 x i32> %105, %103
+  %107 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %106)
+  %108 = add i32 %107, %101
+  ret i32 %108
+}
+
+define i32 @mlav2i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav2i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r2, [r0]
@@ -2718,23 +2429,23 @@ define i32 @mlav2i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    smlabb r0, r3, r2, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i8, i8* %x, align 1
+  %0 = load i8, ptr %x, align 1
   %conv = zext i8 %0 to i32
-  %1 = load i8, i8* %y, align 1
+  %1 = load i8, ptr %y, align 1
   %conv2 = zext i8 %1 to i32
   %mul = mul nuw nsw i32 %conv2, %conv
-  %arrayidx.1 = getelementptr inbounds i8, i8* %x, i32 1
-  %2 = load i8, i8* %arrayidx.1, align 1
+  %arrayidx.1 = getelementptr inbounds i8, ptr %x, i32 1
+  %2 = load i8, ptr %arrayidx.1, align 1
   %conv.1 = zext i8 %2 to i32
-  %arrayidx1.1 = getelementptr inbounds i8, i8* %y, i32 1
-  %3 = load i8, i8* %arrayidx1.1, align 1
+  %arrayidx1.1 = getelementptr inbounds i8, ptr %y, i32 1
+  %3 = load i8, ptr %arrayidx1.1, align 1
   %conv2.1 = zext i8 %3 to i32
   %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1
   %add.1 = add nuw nsw i32 %mul.1, %mul
   ret i32 %add.1
 }
 
-define i32 @mlav4i32i8(i8* %x, i8* %y) {
+define i32 @mlav4i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav4i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -2742,18 +2453,16 @@ define i32 @mlav4i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vmlav.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = zext <4 x i8> %1 to <4 x i32>
-  %3 = bitcast i8* %y to <4 x i8>*
-  %4 = load <4 x i8>, <4 x i8>* %3, align 1
-  %5 = zext <4 x i8> %4 to <4 x i32>
-  %6 = mul nuw nsw <4 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %6)
-  ret i32 %7
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = zext <4 x i8> %0 to <4 x i32>
+  %2 = load <4 x i8>, ptr %y, align 1
+  %3 = zext <4 x i8> %2 to <4 x i32>
+  %4 = mul nuw nsw <4 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav8i32i8(i8* %x, i8* %y) {
+define i32 @mlav8i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav8i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -2761,18 +2470,16 @@ define i32 @mlav8i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vmlav.u16 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %y to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3, align 1
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = mul nuw nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = load <8 x i8>, ptr %y, align 1
+  %3 = zext <8 x i8> %2 to <8 x i32>
+  %4 = mul nuw nsw <8 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav16i32i8(i8* %x, i8* %y) {
+define i32 @mlav16i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav16i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -2780,18 +2487,16 @@ define i32 @mlav16i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vmlav.u8 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %y to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3, align 1
-  %5 = zext <16 x i8> %4 to <16 x i32>
-  %6 = mul nuw nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  ret i32 %7
+  %0 = load <16 x i8>, ptr %x, align 1
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %y, align 1
+  %3 = zext <16 x i8> %2 to <16 x i32>
+  %4 = mul nuw nsw <16 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav24i32i8(i8* %x, i8* %y) {
+define i32 @mlav24i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav24i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -2803,29 +2508,25 @@ define i32 @mlav24i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %y to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3, align 1
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = mul nuw nsw <8 x i32> %5, %2
-  %arrayidx.8 = getelementptr inbounds i8, i8* %x, i32 8
-  %arrayidx1.8 = getelementptr inbounds i8, i8* %y, i32 8
-  %7 = bitcast i8* %arrayidx.8 to <16 x i8>*
-  %8 = load <16 x i8>, <16 x i8>* %7, align 1
-  %9 = zext <16 x i8> %8 to <16 x i32>
-  %10 = bitcast i8* %arrayidx1.8 to <16 x i8>*
-  %11 = load <16 x i8>, <16 x i8>* %10, align 1
-  %12 = zext <16 x i8> %11 to <16 x i32>
-  %13 = mul nuw nsw <16 x i32> %12, %9
-  %14 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %13)
-  %15 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  %op.rdx = add nuw nsw i32 %14, %15
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = load <8 x i8>, ptr %y, align 1
+  %3 = zext <8 x i8> %2 to <8 x i32>
+  %4 = mul nuw nsw <8 x i32> %3, %1
+  %arrayidx.8 = getelementptr inbounds i8, ptr %x, i32 8
+  %arrayidx1.8 = getelementptr inbounds i8, ptr %y, i32 8
+  %5 = load <16 x i8>, ptr %arrayidx.8, align 1
+  %6 = zext <16 x i8> %5 to <16 x i32>
+  %7 = load <16 x i8>, ptr %arrayidx1.8, align 1
+  %8 = zext <16 x i8> %7 to <16 x i32>
+  %9 = mul nuw nsw <16 x i32> %8, %6
+  %10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %9)
+  %11 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  %op.rdx = add nuw nsw i32 %10, %11
   ret i32 %op.rdx
 }
 
-define i32 @mlav32i32i8(i8* %x, i8* %y) {
+define i32 @mlav32i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav32i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -2855,18 +2556,16 @@ define i32 @mlav32i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vmlava.u32 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <32 x i8>*
-  %1 = load <32 x i8>, <32 x i8>* %0, align 1
-  %2 = zext <32 x i8> %1 to <32 x i32>
-  %3 = bitcast i8* %y to <32 x i8>*
-  %4 = load <32 x i8>, <32 x i8>* %3, align 1
-  %5 = zext <32 x i8> %4 to <32 x i32>
-  %6 = mul nuw nsw <32 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %6)
-  ret i32 %7
+  %0 = load <32 x i8>, ptr %x, align 1
+  %1 = zext <32 x i8> %0 to <32 x i32>
+  %2 = load <32 x i8>, ptr %y, align 1
+  %3 = zext <32 x i8> %2 to <32 x i32>
+  %4 = mul nuw nsw <32 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @mlav64i32i8(i8* %x, i8* %y) {
+define i32 @mlav64i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav64i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -2884,51 +2583,43 @@ define i32 @mlav64i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = zext <16 x i8> %wide.load to <16 x i32>
-  %2 = bitcast i8* %y to <16 x i8>*
-  %wide.load11 = load <16 x i8>, <16 x i8>* %2, align 1
-  %3 = zext <16 x i8> %wide.load11 to <16 x i32>
-  %4 = mul nuw nsw <16 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
-  %6 = getelementptr inbounds i8, i8* %x, i32 16
-  %7 = bitcast i8* %6 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %7, align 1
-  %8 = zext <16 x i8> %wide.load.1 to <16 x i32>
-  %9 = getelementptr inbounds i8, i8* %y, i32 16
-  %10 = bitcast i8* %9 to <16 x i8>*
-  %wide.load11.1 = load <16 x i8>, <16 x i8>* %10, align 1
-  %11 = zext <16 x i8> %wide.load11.1 to <16 x i32>
-  %12 = mul nuw nsw <16 x i32> %11, %8
-  %13 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
-  %14 = add i32 %13, %5
-  %15 = getelementptr inbounds i8, i8* %x, i32 32
-  %16 = bitcast i8* %15 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %16, align 1
-  %17 = zext <16 x i8> %wide.load.2 to <16 x i32>
-  %18 = getelementptr inbounds i8, i8* %y, i32 32
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load11.2 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = zext <16 x i8> %wide.load11.2 to <16 x i32>
-  %21 = mul nuw nsw <16 x i32> %20, %17
-  %22 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %21)
-  %23 = add i32 %22, %14
-  %24 = getelementptr inbounds i8, i8* %x, i32 48
-  %25 = bitcast i8* %24 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %25, align 1
-  %26 = zext <16 x i8> %wide.load.3 to <16 x i32>
-  %27 = getelementptr inbounds i8, i8* %y, i32 48
-  %28 = bitcast i8* %27 to <16 x i8>*
-  %wide.load11.3 = load <16 x i8>, <16 x i8>* %28, align 1
-  %29 = zext <16 x i8> %wide.load11.3 to <16 x i32>
-  %30 = mul nuw nsw <16 x i32> %29, %26
-  %31 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %30)
-  %32 = add i32 %31, %23
-  ret i32 %32
-}
-
-define i32 @mlav128i32i8(i8* %x, i8* %y) {
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i32>
+  %wide.load11 = load <16 x i8>, ptr %y, align 1
+  %1 = zext <16 x i8> %wide.load11 to <16 x i32>
+  %2 = mul nuw nsw <16 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
+  %4 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %4, align 1
+  %5 = zext <16 x i8> %wide.load.1 to <16 x i32>
+  %6 = getelementptr inbounds i8, ptr %y, i32 16
+  %wide.load11.1 = load <16 x i8>, ptr %6, align 1
+  %7 = zext <16 x i8> %wide.load11.1 to <16 x i32>
+  %8 = mul nuw nsw <16 x i32> %7, %5
+  %9 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %8)
+  %10 = add i32 %9, %3
+  %11 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %11, align 1
+  %12 = zext <16 x i8> %wide.load.2 to <16 x i32>
+  %13 = getelementptr inbounds i8, ptr %y, i32 32
+  %wide.load11.2 = load <16 x i8>, ptr %13, align 1
+  %14 = zext <16 x i8> %wide.load11.2 to <16 x i32>
+  %15 = mul nuw nsw <16 x i32> %14, %12
+  %16 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %15)
+  %17 = add i32 %16, %10
+  %18 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %18, align 1
+  %19 = zext <16 x i8> %wide.load.3 to <16 x i32>
+  %20 = getelementptr inbounds i8, ptr %y, i32 48
+  %wide.load11.3 = load <16 x i8>, ptr %20, align 1
+  %21 = zext <16 x i8> %wide.load11.3 to <16 x i32>
+  %22 = mul nuw nsw <16 x i32> %21, %19
+  %23 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %22)
+  %24 = add i32 %23, %17
+  ret i32 %24
+}
+
+define i32 @mlav128i32i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav128i32i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -2958,95 +2649,79 @@ define i32 @mlav128i32i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    vmlava.u8 r0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = zext <16 x i8> %wide.load to <16 x i32>
-  %2 = bitcast i8* %y to <16 x i8>*
-  %wide.load11 = load <16 x i8>, <16 x i8>* %2, align 1
-  %3 = zext <16 x i8> %wide.load11 to <16 x i32>
-  %4 = mul nuw nsw <16 x i32> %3, %1
-  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
-  %6 = getelementptr inbounds i8, i8* %x, i32 16
-  %7 = bitcast i8* %6 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %7, align 1
-  %8 = zext <16 x i8> %wide.load.1 to <16 x i32>
-  %9 = getelementptr inbounds i8, i8* %y, i32 16
-  %10 = bitcast i8* %9 to <16 x i8>*
-  %wide.load11.1 = load <16 x i8>, <16 x i8>* %10, align 1
-  %11 = zext <16 x i8> %wide.load11.1 to <16 x i32>
-  %12 = mul nuw nsw <16 x i32> %11, %8
-  %13 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
-  %14 = add i32 %13, %5
-  %15 = getelementptr inbounds i8, i8* %x, i32 32
-  %16 = bitcast i8* %15 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %16, align 1
-  %17 = zext <16 x i8> %wide.load.2 to <16 x i32>
-  %18 = getelementptr inbounds i8, i8* %y, i32 32
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load11.2 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = zext <16 x i8> %wide.load11.2 to <16 x i32>
-  %21 = mul nuw nsw <16 x i32> %20, %17
-  %22 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %21)
-  %23 = add i32 %22, %14
-  %24 = getelementptr inbounds i8, i8* %x, i32 48
-  %25 = bitcast i8* %24 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %25, align 1
-  %26 = zext <16 x i8> %wide.load.3 to <16 x i32>
-  %27 = getelementptr inbounds i8, i8* %y, i32 48
-  %28 = bitcast i8* %27 to <16 x i8>*
-  %wide.load11.3 = load <16 x i8>, <16 x i8>* %28, align 1
-  %29 = zext <16 x i8> %wide.load11.3 to <16 x i32>
-  %30 = mul nuw nsw <16 x i32> %29, %26
-  %31 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %30)
-  %32 = add i32 %31, %23
-  %33 = getelementptr inbounds i8, i8* %x, i32 64
-  %34 = bitcast i8* %33 to <16 x i8>*
-  %wide.load.4 = load <16 x i8>, <16 x i8>* %34, align 1
-  %35 = zext <16 x i8> %wide.load.4 to <16 x i32>
-  %36 = getelementptr inbounds i8, i8* %y, i32 64
-  %37 = bitcast i8* %36 to <16 x i8>*
-  %wide.load11.4 = load <16 x i8>, <16 x i8>* %37, align 1
-  %38 = zext <16 x i8> %wide.load11.4 to <16 x i32>
-  %39 = mul nuw nsw <16 x i32> %38, %35
-  %40 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %39)
-  %41 = add i32 %40, %32
-  %42 = getelementptr inbounds i8, i8* %x, i32 80
-  %43 = bitcast i8* %42 to <16 x i8>*
-  %wide.load.5 = load <16 x i8>, <16 x i8>* %43, align 1
-  %44 = zext <16 x i8> %wide.load.5 to <16 x i32>
-  %45 = getelementptr inbounds i8, i8* %y, i32 80
-  %46 = bitcast i8* %45 to <16 x i8>*
-  %wide.load11.5 = load <16 x i8>, <16 x i8>* %46, align 1
-  %47 = zext <16 x i8> %wide.load11.5 to <16 x i32>
-  %48 = mul nuw nsw <16 x i32> %47, %44
-  %49 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %48)
-  %50 = add i32 %49, %41
-  %51 = getelementptr inbounds i8, i8* %x, i32 96
-  %52 = bitcast i8* %51 to <16 x i8>*
-  %wide.load.6 = load <16 x i8>, <16 x i8>* %52, align 1
-  %53 = zext <16 x i8> %wide.load.6 to <16 x i32>
-  %54 = getelementptr inbounds i8, i8* %y, i32 96
-  %55 = bitcast i8* %54 to <16 x i8>*
-  %wide.load11.6 = load <16 x i8>, <16 x i8>* %55, align 1
-  %56 = zext <16 x i8> %wide.load11.6 to <16 x i32>
-  %57 = mul nuw nsw <16 x i32> %56, %53
-  %58 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %57)
-  %59 = add i32 %58, %50
-  %60 = getelementptr inbounds i8, i8* %x, i32 112
-  %61 = bitcast i8* %60 to <16 x i8>*
-  %wide.load.7 = load <16 x i8>, <16 x i8>* %61, align 1
-  %62 = zext <16 x i8> %wide.load.7 to <16 x i32>
-  %63 = getelementptr inbounds i8, i8* %y, i32 112
-  %64 = bitcast i8* %63 to <16 x i8>*
-  %wide.load11.7 = load <16 x i8>, <16 x i8>* %64, align 1
-  %65 = zext <16 x i8> %wide.load11.7 to <16 x i32>
-  %66 = mul nuw nsw <16 x i32> %65, %62
-  %67 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %66)
-  %68 = add i32 %67, %59
-  ret i32 %68
-}
-
-define signext i16 @mlav2i16i16(i16* %x, i16* %y) {
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i32>
+  %wide.load11 = load <16 x i8>, ptr %y, align 1
+  %1 = zext <16 x i8> %wide.load11 to <16 x i32>
+  %2 = mul nuw nsw <16 x i32> %1, %0
+  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
+  %4 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %4, align 1
+  %5 = zext <16 x i8> %wide.load.1 to <16 x i32>
+  %6 = getelementptr inbounds i8, ptr %y, i32 16
+  %wide.load11.1 = load <16 x i8>, ptr %6, align 1
+  %7 = zext <16 x i8> %wide.load11.1 to <16 x i32>
+  %8 = mul nuw nsw <16 x i32> %7, %5
+  %9 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %8)
+  %10 = add i32 %9, %3
+  %11 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %11, align 1
+  %12 = zext <16 x i8> %wide.load.2 to <16 x i32>
+  %13 = getelementptr inbounds i8, ptr %y, i32 32
+  %wide.load11.2 = load <16 x i8>, ptr %13, align 1
+  %14 = zext <16 x i8> %wide.load11.2 to <16 x i32>
+  %15 = mul nuw nsw <16 x i32> %14, %12
+  %16 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %15)
+  %17 = add i32 %16, %10
+  %18 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %18, align 1
+  %19 = zext <16 x i8> %wide.load.3 to <16 x i32>
+  %20 = getelementptr inbounds i8, ptr %y, i32 48
+  %wide.load11.3 = load <16 x i8>, ptr %20, align 1
+  %21 = zext <16 x i8> %wide.load11.3 to <16 x i32>
+  %22 = mul nuw nsw <16 x i32> %21, %19
+  %23 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %22)
+  %24 = add i32 %23, %17
+  %25 = getelementptr inbounds i8, ptr %x, i32 64
+  %wide.load.4 = load <16 x i8>, ptr %25, align 1
+  %26 = zext <16 x i8> %wide.load.4 to <16 x i32>
+  %27 = getelementptr inbounds i8, ptr %y, i32 64
+  %wide.load11.4 = load <16 x i8>, ptr %27, align 1
+  %28 = zext <16 x i8> %wide.load11.4 to <16 x i32>
+  %29 = mul nuw nsw <16 x i32> %28, %26
+  %30 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %29)
+  %31 = add i32 %30, %24
+  %32 = getelementptr inbounds i8, ptr %x, i32 80
+  %wide.load.5 = load <16 x i8>, ptr %32, align 1
+  %33 = zext <16 x i8> %wide.load.5 to <16 x i32>
+  %34 = getelementptr inbounds i8, ptr %y, i32 80
+  %wide.load11.5 = load <16 x i8>, ptr %34, align 1
+  %35 = zext <16 x i8> %wide.load11.5 to <16 x i32>
+  %36 = mul nuw nsw <16 x i32> %35, %33
+  %37 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %36)
+  %38 = add i32 %37, %31
+  %39 = getelementptr inbounds i8, ptr %x, i32 96
+  %wide.load.6 = load <16 x i8>, ptr %39, align 1
+  %40 = zext <16 x i8> %wide.load.6 to <16 x i32>
+  %41 = getelementptr inbounds i8, ptr %y, i32 96
+  %wide.load11.6 = load <16 x i8>, ptr %41, align 1
+  %42 = zext <16 x i8> %wide.load11.6 to <16 x i32>
+  %43 = mul nuw nsw <16 x i32> %42, %40
+  %44 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %43)
+  %45 = add i32 %44, %38
+  %46 = getelementptr inbounds i8, ptr %x, i32 112
+  %wide.load.7 = load <16 x i8>, ptr %46, align 1
+  %47 = zext <16 x i8> %wide.load.7 to <16 x i32>
+  %48 = getelementptr inbounds i8, ptr %y, i32 112
+  %wide.load11.7 = load <16 x i8>, ptr %48, align 1
+  %49 = zext <16 x i8> %wide.load11.7 to <16 x i32>
+  %50 = mul nuw nsw <16 x i32> %49, %47
+  %51 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %50)
+  %52 = add i32 %51, %45
+  ret i32 %52
+}
+
+define signext i16 @mlav2i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav2i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrh r2, [r0]
@@ -3058,19 +2733,19 @@ define signext i16 @mlav2i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i16, i16* %x, align 2
-  %1 = load i16, i16* %y, align 2
+  %0 = load i16, ptr %x, align 2
+  %1 = load i16, ptr %y, align 2
   %mul = mul i16 %1, %0
-  %arrayidx.1 = getelementptr inbounds i16, i16* %x, i32 1
-  %2 = load i16, i16* %arrayidx.1, align 2
-  %arrayidx1.1 = getelementptr inbounds i16, i16* %y, i32 1
-  %3 = load i16, i16* %arrayidx1.1, align 2
+  %arrayidx.1 = getelementptr inbounds i16, ptr %x, i32 1
+  %2 = load i16, ptr %arrayidx.1, align 2
+  %arrayidx1.1 = getelementptr inbounds i16, ptr %y, i32 1
+  %3 = load i16, ptr %arrayidx1.1, align 2
   %mul.1 = mul i16 %3, %2
   %add.1 = add i16 %mul.1, %mul
   ret i16 %add.1
 }
 
-define signext i16 @mlav4i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav4i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav4i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -3079,16 +2754,14 @@ define signext i16 @mlav4i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <4 x i16>*
-  %1 = load <4 x i16>, <4 x i16>* %0, align 2
-  %2 = bitcast i16* %y to <4 x i16>*
-  %3 = load <4 x i16>, <4 x i16>* %2, align 2
-  %4 = mul <4 x i16> %3, %1
-  %5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %4)
-  ret i16 %5
+  %0 = load <4 x i16>, ptr %x, align 2
+  %1 = load <4 x i16>, ptr %y, align 2
+  %2 = mul <4 x i16> %1, %0
+  %3 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %2)
+  ret i16 %3
 }
 
-define signext i16 @mlav8i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav8i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav8i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3097,16 +2770,14 @@ define signext i16 @mlav8i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i16* %y to <8 x i16>*
-  %3 = load <8 x i16>, <8 x i16>* %2, align 2
-  %4 = mul <8 x i16> %3, %1
-  %5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %4)
-  ret i16 %5
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = load <8 x i16>, ptr %y, align 2
+  %2 = mul <8 x i16> %1, %0
+  %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
+  ret i16 %3
 }
 
-define signext i16 @mlav16i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav16i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav16i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3118,16 +2789,14 @@ define signext i16 @mlav16i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <16 x i16>*
-  %1 = load <16 x i16>, <16 x i16>* %0, align 2
-  %2 = bitcast i16* %y to <16 x i16>*
-  %3 = load <16 x i16>, <16 x i16>* %2, align 2
-  %4 = mul <16 x i16> %3, %1
-  %5 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %4)
-  ret i16 %5
+  %0 = load <16 x i16>, ptr %x, align 2
+  %1 = load <16 x i16>, ptr %y, align 2
+  %2 = mul <16 x i16> %1, %0
+  %3 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %2)
+  ret i16 %3
 }
 
-define signext i16 @mlav24i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav24i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav24i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3142,25 +2811,21 @@ define signext i16 @mlav24i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 2
-  %2 = bitcast i16* %y to <8 x i16>*
-  %3 = load <8 x i16>, <8 x i16>* %2, align 2
-  %4 = mul <8 x i16> %3, %1
-  %arrayidx.8 = getelementptr inbounds i16, i16* %x, i32 8
-  %arrayidx1.8 = getelementptr inbounds i16, i16* %y, i32 8
-  %5 = bitcast i16* %arrayidx.8 to <16 x i16>*
-  %6 = load <16 x i16>, <16 x i16>* %5, align 2
-  %7 = bitcast i16* %arrayidx1.8 to <16 x i16>*
-  %8 = load <16 x i16>, <16 x i16>* %7, align 2
-  %9 = mul <16 x i16> %8, %6
-  %10 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %9)
-  %11 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %4)
-  %op.rdx = add i16 %10, %11
+  %0 = load <8 x i16>, ptr %x, align 2
+  %1 = load <8 x i16>, ptr %y, align 2
+  %2 = mul <8 x i16> %1, %0
+  %arrayidx.8 = getelementptr inbounds i16, ptr %x, i32 8
+  %arrayidx1.8 = getelementptr inbounds i16, ptr %y, i32 8
+  %3 = load <16 x i16>, ptr %arrayidx.8, align 2
+  %4 = load <16 x i16>, ptr %arrayidx1.8, align 2
+  %5 = mul <16 x i16> %4, %3
+  %6 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %5)
+  %7 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
+  %op.rdx = add i16 %6, %7
   ret i16 %op.rdx
 }
 
-define signext i16 @mlav32i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav32i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav32i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3178,16 +2843,14 @@ define signext i16 @mlav32i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <32 x i16>*
-  %1 = load <32 x i16>, <32 x i16>* %0, align 2
-  %2 = bitcast i16* %y to <32 x i16>*
-  %3 = load <32 x i16>, <32 x i16>* %2, align 2
-  %4 = mul <32 x i16> %3, %1
-  %5 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %4)
-  ret i16 %5
+  %0 = load <32 x i16>, ptr %x, align 2
+  %1 = load <32 x i16>, ptr %y, align 2
+  %2 = mul <32 x i16> %1, %0
+  %3 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %2)
+  ret i16 %3
 }
 
-define signext i16 @mlav64i16i16(i16* %x, i16* %y) {
+define signext i16 @mlav64i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav64i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3217,79 +2880,63 @@ define signext i16 @mlav64i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = bitcast i16* %y to <8 x i16>*
-  %wide.load13 = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = mul <8 x i16> %wide.load13, %wide.load
-  %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
-  %4 = getelementptr inbounds i16, i16* %x, i32 8
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %5, align 2
-  %6 = getelementptr inbounds i16, i16* %y, i32 8
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load13.1 = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = mul <8 x i16> %wide.load13.1, %wide.load.1
-  %9 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %8)
-  %10 = add i16 %9, %3
-  %11 = getelementptr inbounds i16, i16* %x, i32 16
-  %12 = bitcast i16* %11 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %12, align 2
-  %13 = getelementptr inbounds i16, i16* %y, i32 16
-  %14 = bitcast i16* %13 to <8 x i16>*
-  %wide.load13.2 = load <8 x i16>, <8 x i16>* %14, align 2
-  %15 = mul <8 x i16> %wide.load13.2, %wide.load.2
-  %16 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %15)
-  %17 = add i16 %16, %10
-  %18 = getelementptr inbounds i16, i16* %x, i32 24
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = getelementptr inbounds i16, i16* %y, i32 24
-  %21 = bitcast i16* %20 to <8 x i16>*
-  %wide.load13.3 = load <8 x i16>, <8 x i16>* %21, align 2
-  %22 = mul <8 x i16> %wide.load13.3, %wide.load.3
-  %23 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %22)
-  %24 = add i16 %23, %17
-  %25 = getelementptr inbounds i16, i16* %x, i32 32
-  %26 = bitcast i16* %25 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %26, align 2
-  %27 = getelementptr inbounds i16, i16* %y, i32 32
-  %28 = bitcast i16* %27 to <8 x i16>*
-  %wide.load13.4 = load <8 x i16>, <8 x i16>* %28, align 2
-  %29 = mul <8 x i16> %wide.load13.4, %wide.load.4
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %wide.load13 = load <8 x i16>, ptr %y, align 2
+  %0 = mul <8 x i16> %wide.load13, %wide.load
+  %1 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %0)
+  %2 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %2, align 2
+  %3 = getelementptr inbounds i16, ptr %y, i32 8
+  %wide.load13.1 = load <8 x i16>, ptr %3, align 2
+  %4 = mul <8 x i16> %wide.load13.1, %wide.load.1
+  %5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %4)
+  %6 = add i16 %5, %1
+  %7 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %y, i32 16
+  %wide.load13.2 = load <8 x i16>, ptr %8, align 2
+  %9 = mul <8 x i16> %wide.load13.2, %wide.load.2
+  %10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %9)
+  %11 = add i16 %10, %6
+  %12 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %12, align 2
+  %13 = getelementptr inbounds i16, ptr %y, i32 24
+  %wide.load13.3 = load <8 x i16>, ptr %13, align 2
+  %14 = mul <8 x i16> %wide.load13.3, %wide.load.3
+  %15 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %14)
+  %16 = add i16 %15, %11
+  %17 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %17, align 2
+  %18 = getelementptr inbounds i16, ptr %y, i32 32
+  %wide.load13.4 = load <8 x i16>, ptr %18, align 2
+  %19 = mul <8 x i16> %wide.load13.4, %wide.load.4
+  %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %19)
+  %21 = add i16 %20, %16
+  %22 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %22, align 2
+  %23 = getelementptr inbounds i16, ptr %y, i32 40
+  %wide.load13.5 = load <8 x i16>, ptr %23, align 2
+  %24 = mul <8 x i16> %wide.load13.5, %wide.load.5
+  %25 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %24)
+  %26 = add i16 %25, %21
+  %27 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %27, align 2
+  %28 = getelementptr inbounds i16, ptr %y, i32 48
+  %wide.load13.6 = load <8 x i16>, ptr %28, align 2
+  %29 = mul <8 x i16> %wide.load13.6, %wide.load.6
   %30 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %29)
-  %31 = add i16 %30, %24
-  %32 = getelementptr inbounds i16, i16* %x, i32 40
-  %33 = bitcast i16* %32 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %33, align 2
-  %34 = getelementptr inbounds i16, i16* %y, i32 40
-  %35 = bitcast i16* %34 to <8 x i16>*
-  %wide.load13.5 = load <8 x i16>, <8 x i16>* %35, align 2
-  %36 = mul <8 x i16> %wide.load13.5, %wide.load.5
-  %37 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %36)
-  %38 = add i16 %37, %31
-  %39 = getelementptr inbounds i16, i16* %x, i32 48
-  %40 = bitcast i16* %39 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %40, align 2
-  %41 = getelementptr inbounds i16, i16* %y, i32 48
-  %42 = bitcast i16* %41 to <8 x i16>*
-  %wide.load13.6 = load <8 x i16>, <8 x i16>* %42, align 2
-  %43 = mul <8 x i16> %wide.load13.6, %wide.load.6
-  %44 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %43)
-  %45 = add i16 %44, %38
-  %46 = getelementptr inbounds i16, i16* %x, i32 56
-  %47 = bitcast i16* %46 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %47, align 2
-  %48 = getelementptr inbounds i16, i16* %y, i32 56
-  %49 = bitcast i16* %48 to <8 x i16>*
-  %wide.load13.7 = load <8 x i16>, <8 x i16>* %49, align 2
-  %50 = mul <8 x i16> %wide.load13.7, %wide.load.7
-  %51 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %50)
-  %52 = add i16 %51, %45
-  ret i16 %52
-}
-
-define signext i16 @mlav128i16i16(i16* %x, i16* %y) {
+  %31 = add i16 %30, %26
+  %32 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %32, align 2
+  %33 = getelementptr inbounds i16, ptr %y, i32 56
+  %wide.load13.7 = load <8 x i16>, ptr %33, align 2
+  %34 = mul <8 x i16> %wide.load13.7, %wide.load.7
+  %35 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %34)
+  %36 = add i16 %35, %31
+  ret i16 %36
+}
+
+define signext i16 @mlav128i16i16(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav128i16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -3343,151 +2990,119 @@ define signext i16 @mlav128i16i16(i16* %x, i16* %y) {
 ; CHECK-NEXT:    sxth r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i16* %x to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %0, align 2
-  %1 = bitcast i16* %y to <8 x i16>*
-  %wide.load13 = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = mul <8 x i16> %wide.load13, %wide.load
-  %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
-  %4 = getelementptr inbounds i16, i16* %x, i32 8
-  %5 = bitcast i16* %4 to <8 x i16>*
-  %wide.load.1 = load <8 x i16>, <8 x i16>* %5, align 2
-  %6 = getelementptr inbounds i16, i16* %y, i32 8
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load13.1 = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = mul <8 x i16> %wide.load13.1, %wide.load.1
-  %9 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %8)
-  %10 = add i16 %9, %3
-  %11 = getelementptr inbounds i16, i16* %x, i32 16
-  %12 = bitcast i16* %11 to <8 x i16>*
-  %wide.load.2 = load <8 x i16>, <8 x i16>* %12, align 2
-  %13 = getelementptr inbounds i16, i16* %y, i32 16
-  %14 = bitcast i16* %13 to <8 x i16>*
-  %wide.load13.2 = load <8 x i16>, <8 x i16>* %14, align 2
-  %15 = mul <8 x i16> %wide.load13.2, %wide.load.2
-  %16 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %15)
-  %17 = add i16 %16, %10
-  %18 = getelementptr inbounds i16, i16* %x, i32 24
-  %19 = bitcast i16* %18 to <8 x i16>*
-  %wide.load.3 = load <8 x i16>, <8 x i16>* %19, align 2
-  %20 = getelementptr inbounds i16, i16* %y, i32 24
-  %21 = bitcast i16* %20 to <8 x i16>*
-  %wide.load13.3 = load <8 x i16>, <8 x i16>* %21, align 2
-  %22 = mul <8 x i16> %wide.load13.3, %wide.load.3
-  %23 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %22)
-  %24 = add i16 %23, %17
-  %25 = getelementptr inbounds i16, i16* %x, i32 32
-  %26 = bitcast i16* %25 to <8 x i16>*
-  %wide.load.4 = load <8 x i16>, <8 x i16>* %26, align 2
-  %27 = getelementptr inbounds i16, i16* %y, i32 32
-  %28 = bitcast i16* %27 to <8 x i16>*
-  %wide.load13.4 = load <8 x i16>, <8 x i16>* %28, align 2
-  %29 = mul <8 x i16> %wide.load13.4, %wide.load.4
+  %wide.load = load <8 x i16>, ptr %x, align 2
+  %wide.load13 = load <8 x i16>, ptr %y, align 2
+  %0 = mul <8 x i16> %wide.load13, %wide.load
+  %1 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %0)
+  %2 = getelementptr inbounds i16, ptr %x, i32 8
+  %wide.load.1 = load <8 x i16>, ptr %2, align 2
+  %3 = getelementptr inbounds i16, ptr %y, i32 8
+  %wide.load13.1 = load <8 x i16>, ptr %3, align 2
+  %4 = mul <8 x i16> %wide.load13.1, %wide.load.1
+  %5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %4)
+  %6 = add i16 %5, %1
+  %7 = getelementptr inbounds i16, ptr %x, i32 16
+  %wide.load.2 = load <8 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %y, i32 16
+  %wide.load13.2 = load <8 x i16>, ptr %8, align 2
+  %9 = mul <8 x i16> %wide.load13.2, %wide.load.2
+  %10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %9)
+  %11 = add i16 %10, %6
+  %12 = getelementptr inbounds i16, ptr %x, i32 24
+  %wide.load.3 = load <8 x i16>, ptr %12, align 2
+  %13 = getelementptr inbounds i16, ptr %y, i32 24
+  %wide.load13.3 = load <8 x i16>, ptr %13, align 2
+  %14 = mul <8 x i16> %wide.load13.3, %wide.load.3
+  %15 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %14)
+  %16 = add i16 %15, %11
+  %17 = getelementptr inbounds i16, ptr %x, i32 32
+  %wide.load.4 = load <8 x i16>, ptr %17, align 2
+  %18 = getelementptr inbounds i16, ptr %y, i32 32
+  %wide.load13.4 = load <8 x i16>, ptr %18, align 2
+  %19 = mul <8 x i16> %wide.load13.4, %wide.load.4
+  %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %19)
+  %21 = add i16 %20, %16
+  %22 = getelementptr inbounds i16, ptr %x, i32 40
+  %wide.load.5 = load <8 x i16>, ptr %22, align 2
+  %23 = getelementptr inbounds i16, ptr %y, i32 40
+  %wide.load13.5 = load <8 x i16>, ptr %23, align 2
+  %24 = mul <8 x i16> %wide.load13.5, %wide.load.5
+  %25 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %24)
+  %26 = add i16 %25, %21
+  %27 = getelementptr inbounds i16, ptr %x, i32 48
+  %wide.load.6 = load <8 x i16>, ptr %27, align 2
+  %28 = getelementptr inbounds i16, ptr %y, i32 48
+  %wide.load13.6 = load <8 x i16>, ptr %28, align 2
+  %29 = mul <8 x i16> %wide.load13.6, %wide.load.6
   %30 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %29)
-  %31 = add i16 %30, %24
-  %32 = getelementptr inbounds i16, i16* %x, i32 40
-  %33 = bitcast i16* %32 to <8 x i16>*
-  %wide.load.5 = load <8 x i16>, <8 x i16>* %33, align 2
-  %34 = getelementptr inbounds i16, i16* %y, i32 40
-  %35 = bitcast i16* %34 to <8 x i16>*
-  %wide.load13.5 = load <8 x i16>, <8 x i16>* %35, align 2
-  %36 = mul <8 x i16> %wide.load13.5, %wide.load.5
-  %37 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %36)
-  %38 = add i16 %37, %31
-  %39 = getelementptr inbounds i16, i16* %x, i32 48
-  %40 = bitcast i16* %39 to <8 x i16>*
-  %wide.load.6 = load <8 x i16>, <8 x i16>* %40, align 2
-  %41 = getelementptr inbounds i16, i16* %y, i32 48
-  %42 = bitcast i16* %41 to <8 x i16>*
-  %wide.load13.6 = load <8 x i16>, <8 x i16>* %42, align 2
-  %43 = mul <8 x i16> %wide.load13.6, %wide.load.6
-  %44 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %43)
-  %45 = add i16 %44, %38
-  %46 = getelementptr inbounds i16, i16* %x, i32 56
-  %47 = bitcast i16* %46 to <8 x i16>*
-  %wide.load.7 = load <8 x i16>, <8 x i16>* %47, align 2
-  %48 = getelementptr inbounds i16, i16* %y, i32 56
-  %49 = bitcast i16* %48 to <8 x i16>*
-  %wide.load13.7 = load <8 x i16>, <8 x i16>* %49, align 2
-  %50 = mul <8 x i16> %wide.load13.7, %wide.load.7
-  %51 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %50)
-  %52 = add i16 %51, %45
-  %53 = getelementptr inbounds i16, i16* %x, i32 64
-  %54 = bitcast i16* %53 to <8 x i16>*
-  %wide.load.8 = load <8 x i16>, <8 x i16>* %54, align 2
-  %55 = getelementptr inbounds i16, i16* %y, i32 64
-  %56 = bitcast i16* %55 to <8 x i16>*
-  %wide.load13.8 = load <8 x i16>, <8 x i16>* %56, align 2
-  %57 = mul <8 x i16> %wide.load13.8, %wide.load.8
-  %58 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %57)
-  %59 = add i16 %58, %52
-  %60 = getelementptr inbounds i16, i16* %x, i32 72
-  %61 = bitcast i16* %60 to <8 x i16>*
-  %wide.load.9 = load <8 x i16>, <8 x i16>* %61, align 2
-  %62 = getelementptr inbounds i16, i16* %y, i32 72
-  %63 = bitcast i16* %62 to <8 x i16>*
-  %wide.load13.9 = load <8 x i16>, <8 x i16>* %63, align 2
-  %64 = mul <8 x i16> %wide.load13.9, %wide.load.9
+  %31 = add i16 %30, %26
+  %32 = getelementptr inbounds i16, ptr %x, i32 56
+  %wide.load.7 = load <8 x i16>, ptr %32, align 2
+  %33 = getelementptr inbounds i16, ptr %y, i32 56
+  %wide.load13.7 = load <8 x i16>, ptr %33, align 2
+  %34 = mul <8 x i16> %wide.load13.7, %wide.load.7
+  %35 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %34)
+  %36 = add i16 %35, %31
+  %37 = getelementptr inbounds i16, ptr %x, i32 64
+  %wide.load.8 = load <8 x i16>, ptr %37, align 2
+  %38 = getelementptr inbounds i16, ptr %y, i32 64
+  %wide.load13.8 = load <8 x i16>, ptr %38, align 2
+  %39 = mul <8 x i16> %wide.load13.8, %wide.load.8
+  %40 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %39)
+  %41 = add i16 %40, %36
+  %42 = getelementptr inbounds i16, ptr %x, i32 72
+  %wide.load.9 = load <8 x i16>, ptr %42, align 2
+  %43 = getelementptr inbounds i16, ptr %y, i32 72
+  %wide.load13.9 = load <8 x i16>, ptr %43, align 2
+  %44 = mul <8 x i16> %wide.load13.9, %wide.load.9
+  %45 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %44)
+  %46 = add i16 %45, %41
+  %47 = getelementptr inbounds i16, ptr %x, i32 80
+  %wide.load.10 = load <8 x i16>, ptr %47, align 2
+  %48 = getelementptr inbounds i16, ptr %y, i32 80
+  %wide.load13.10 = load <8 x i16>, ptr %48, align 2
+  %49 = mul <8 x i16> %wide.load13.10, %wide.load.10
+  %50 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %49)
+  %51 = add i16 %50, %46
+  %52 = getelementptr inbounds i16, ptr %x, i32 88
+  %wide.load.11 = load <8 x i16>, ptr %52, align 2
+  %53 = getelementptr inbounds i16, ptr %y, i32 88
+  %wide.load13.11 = load <8 x i16>, ptr %53, align 2
+  %54 = mul <8 x i16> %wide.load13.11, %wide.load.11
+  %55 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %54)
+  %56 = add i16 %55, %51
+  %57 = getelementptr inbounds i16, ptr %x, i32 96
+  %wide.load.12 = load <8 x i16>, ptr %57, align 2
+  %58 = getelementptr inbounds i16, ptr %y, i32 96
+  %wide.load13.12 = load <8 x i16>, ptr %58, align 2
+  %59 = mul <8 x i16> %wide.load13.12, %wide.load.12
+  %60 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %59)
+  %61 = add i16 %60, %56
+  %62 = getelementptr inbounds i16, ptr %x, i32 104
+  %wide.load.13 = load <8 x i16>, ptr %62, align 2
+  %63 = getelementptr inbounds i16, ptr %y, i32 104
+  %wide.load13.13 = load <8 x i16>, ptr %63, align 2
+  %64 = mul <8 x i16> %wide.load13.13, %wide.load.13
   %65 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %64)
-  %66 = add i16 %65, %59
-  %67 = getelementptr inbounds i16, i16* %x, i32 80
-  %68 = bitcast i16* %67 to <8 x i16>*
-  %wide.load.10 = load <8 x i16>, <8 x i16>* %68, align 2
-  %69 = getelementptr inbounds i16, i16* %y, i32 80
-  %70 = bitcast i16* %69 to <8 x i16>*
-  %wide.load13.10 = load <8 x i16>, <8 x i16>* %70, align 2
-  %71 = mul <8 x i16> %wide.load13.10, %wide.load.10
-  %72 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %71)
-  %73 = add i16 %72, %66
-  %74 = getelementptr inbounds i16, i16* %x, i32 88
-  %75 = bitcast i16* %74 to <8 x i16>*
-  %wide.load.11 = load <8 x i16>, <8 x i16>* %75, align 2
-  %76 = getelementptr inbounds i16, i16* %y, i32 88
-  %77 = bitcast i16* %76 to <8 x i16>*
-  %wide.load13.11 = load <8 x i16>, <8 x i16>* %77, align 2
-  %78 = mul <8 x i16> %wide.load13.11, %wide.load.11
-  %79 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %78)
-  %80 = add i16 %79, %73
-  %81 = getelementptr inbounds i16, i16* %x, i32 96
-  %82 = bitcast i16* %81 to <8 x i16>*
-  %wide.load.12 = load <8 x i16>, <8 x i16>* %82, align 2
-  %83 = getelementptr inbounds i16, i16* %y, i32 96
-  %84 = bitcast i16* %83 to <8 x i16>*
-  %wide.load13.12 = load <8 x i16>, <8 x i16>* %84, align 2
-  %85 = mul <8 x i16> %wide.load13.12, %wide.load.12
-  %86 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %85)
-  %87 = add i16 %86, %80
-  %88 = getelementptr inbounds i16, i16* %x, i32 104
-  %89 = bitcast i16* %88 to <8 x i16>*
-  %wide.load.13 = load <8 x i16>, <8 x i16>* %89, align 2
-  %90 = getelementptr inbounds i16, i16* %y, i32 104
-  %91 = bitcast i16* %90 to <8 x i16>*
-  %wide.load13.13 = load <8 x i16>, <8 x i16>* %91, align 2
-  %92 = mul <8 x i16> %wide.load13.13, %wide.load.13
-  %93 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %92)
-  %94 = add i16 %93, %87
-  %95 = getelementptr inbounds i16, i16* %x, i32 112
-  %96 = bitcast i16* %95 to <8 x i16>*
-  %wide.load.14 = load <8 x i16>, <8 x i16>* %96, align 2
-  %97 = getelementptr inbounds i16, i16* %y, i32 112
-  %98 = bitcast i16* %97 to <8 x i16>*
-  %wide.load13.14 = load <8 x i16>, <8 x i16>* %98, align 2
-  %99 = mul <8 x i16> %wide.load13.14, %wide.load.14
-  %100 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %99)
-  %101 = add i16 %100, %94
-  %102 = getelementptr inbounds i16, i16* %x, i32 120
-  %103 = bitcast i16* %102 to <8 x i16>*
-  %wide.load.15 = load <8 x i16>, <8 x i16>* %103, align 2
-  %104 = getelementptr inbounds i16, i16* %y, i32 120
-  %105 = bitcast i16* %104 to <8 x i16>*
-  %wide.load13.15 = load <8 x i16>, <8 x i16>* %105, align 2
-  %106 = mul <8 x i16> %wide.load13.15, %wide.load.15
-  %107 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %106)
-  %108 = add i16 %107, %101
-  ret i16 %108
-}
-
-define zeroext i8 @mlav2i8i8(i8* %x, i8* %y) {
+  %66 = add i16 %65, %61
+  %67 = getelementptr inbounds i16, ptr %x, i32 112
+  %wide.load.14 = load <8 x i16>, ptr %67, align 2
+  %68 = getelementptr inbounds i16, ptr %y, i32 112
+  %wide.load13.14 = load <8 x i16>, ptr %68, align 2
+  %69 = mul <8 x i16> %wide.load13.14, %wide.load.14
+  %70 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %69)
+  %71 = add i16 %70, %66
+  %72 = getelementptr inbounds i16, ptr %x, i32 120
+  %wide.load.15 = load <8 x i16>, ptr %72, align 2
+  %73 = getelementptr inbounds i16, ptr %y, i32 120
+  %wide.load13.15 = load <8 x i16>, ptr %73, align 2
+  %74 = mul <8 x i16> %wide.load13.15, %wide.load.15
+  %75 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %74)
+  %76 = add i16 %75, %71
+  ret i16 %76
+}
+
+define zeroext i8 @mlav2i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav2i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r2, [r0]
@@ -3499,19 +3114,19 @@ define zeroext i8 @mlav2i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = load i8, i8* %x, align 1
-  %1 = load i8, i8* %y, align 1
+  %0 = load i8, ptr %x, align 1
+  %1 = load i8, ptr %y, align 1
   %mul = mul i8 %1, %0
-  %arrayidx.1 = getelementptr inbounds i8, i8* %x, i32 1
-  %2 = load i8, i8* %arrayidx.1, align 1
-  %arrayidx1.1 = getelementptr inbounds i8, i8* %y, i32 1
-  %3 = load i8, i8* %arrayidx1.1, align 1
+  %arrayidx.1 = getelementptr inbounds i8, ptr %x, i32 1
+  %2 = load i8, ptr %arrayidx.1, align 1
+  %arrayidx1.1 = getelementptr inbounds i8, ptr %y, i32 1
+  %3 = load i8, ptr %arrayidx1.1, align 1
   %mul.1 = mul i8 %3, %2
   %add.1 = add i8 %mul.1, %mul
   ret i8 %add.1
 }
 
-define zeroext i8 @mlav4i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav4i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav4i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -3520,16 +3135,14 @@ define zeroext i8 @mlav4i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <4 x i8>*
-  %3 = load <4 x i8>, <4 x i8>* %2, align 1
-  %4 = mul <4 x i8> %3, %1
-  %5 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %4)
-  ret i8 %5
+  %0 = load <4 x i8>, ptr %x, align 1
+  %1 = load <4 x i8>, ptr %y, align 1
+  %2 = mul <4 x i8> %1, %0
+  %3 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %2)
+  ret i8 %3
 }
 
-define zeroext i8 @mlav8i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav8i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav8i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -3538,16 +3151,14 @@ define zeroext i8 @mlav8i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  %3 = load <8 x i8>, <8 x i8>* %2, align 1
-  %4 = mul <8 x i8> %3, %1
-  %5 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %4)
-  ret i8 %5
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = load <8 x i8>, ptr %y, align 1
+  %2 = mul <8 x i8> %1, %0
+  %3 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %2)
+  ret i8 %3
 }
 
-define zeroext i8 @mlav16i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav16i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav16i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -3556,16 +3167,14 @@ define zeroext i8 @mlav16i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r0
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <16 x i8>*
-  %3 = load <16 x i8>, <16 x i8>* %2, align 1
-  %4 = mul <16 x i8> %3, %1
-  %5 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %4)
-  ret i8 %5
+  %0 = load <16 x i8>, ptr %x, align 1
+  %1 = load <16 x i8>, ptr %y, align 1
+  %2 = mul <16 x i8> %1, %0
+  %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2)
+  ret i8 %3
 }
 
-define zeroext i8 @mlav24i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav24i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav24i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -3577,25 +3186,21 @@ define zeroext i8 @mlav24i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <8 x i8>*
-  %3 = load <8 x i8>, <8 x i8>* %2, align 1
-  %4 = mul <8 x i8> %3, %1
-  %arrayidx.8 = getelementptr inbounds i8, i8* %x, i32 8
-  %arrayidx1.8 = getelementptr inbounds i8, i8* %y, i32 8
-  %5 = bitcast i8* %arrayidx.8 to <16 x i8>*
-  %6 = load <16 x i8>, <16 x i8>* %5, align 1
-  %7 = bitcast i8* %arrayidx1.8 to <16 x i8>*
-  %8 = load <16 x i8>, <16 x i8>* %7, align 1
-  %9 = mul <16 x i8> %8, %6
-  %10 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %9)
-  %11 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %4)
-  %op.rdx = add i8 %10, %11
+  %0 = load <8 x i8>, ptr %x, align 1
+  %1 = load <8 x i8>, ptr %y, align 1
+  %2 = mul <8 x i8> %1, %0
+  %arrayidx.8 = getelementptr inbounds i8, ptr %x, i32 8
+  %arrayidx1.8 = getelementptr inbounds i8, ptr %y, i32 8
+  %3 = load <16 x i8>, ptr %arrayidx.8, align 1
+  %4 = load <16 x i8>, ptr %arrayidx1.8, align 1
+  %5 = mul <16 x i8> %4, %3
+  %6 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %5)
+  %7 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %2)
+  %op.rdx = add i8 %6, %7
   ret i8 %op.rdx
 }
 
-define zeroext i8 @mlav32i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav32i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav32i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -3607,16 +3212,14 @@ define zeroext i8 @mlav32i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <32 x i8>*
-  %1 = load <32 x i8>, <32 x i8>* %0, align 1
-  %2 = bitcast i8* %y to <32 x i8>*
-  %3 = load <32 x i8>, <32 x i8>* %2, align 1
-  %4 = mul <32 x i8> %3, %1
-  %5 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %4)
-  ret i8 %5
+  %0 = load <32 x i8>, ptr %x, align 1
+  %1 = load <32 x i8>, ptr %y, align 1
+  %2 = mul <32 x i8> %1, %0
+  %3 = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %2)
+  ret i8 %3
 }
 
-define zeroext i8 @mlav64i8i8(i8* %x, i8* %y) {
+define zeroext i8 @mlav64i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav64i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -3634,43 +3237,35 @@ define zeroext i8 @mlav64i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = bitcast i8* %y to <16 x i8>*
-  %wide.load12 = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = mul <16 x i8> %wide.load12, %wide.load
-  %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2)
-  %4 = getelementptr inbounds i8, i8* %x, i32 16
-  %5 = bitcast i8* %4 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %5, align 1
-  %6 = getelementptr inbounds i8, i8* %y, i32 16
-  %7 = bitcast i8* %6 to <16 x i8>*
-  %wide.load12.1 = load <16 x i8>, <16 x i8>* %7, align 1
-  %8 = mul <16 x i8> %wide.load12.1, %wide.load.1
-  %9 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %8)
-  %10 = add i8 %9, %3
-  %11 = getelementptr inbounds i8, i8* %x, i32 32
-  %12 = bitcast i8* %11 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %12, align 1
-  %13 = getelementptr inbounds i8, i8* %y, i32 32
-  %14 = bitcast i8* %13 to <16 x i8>*
-  %wide.load12.2 = load <16 x i8>, <16 x i8>* %14, align 1
-  %15 = mul <16 x i8> %wide.load12.2, %wide.load.2
-  %16 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %15)
-  %17 = add i8 %16, %10
-  %18 = getelementptr inbounds i8, i8* %x, i32 48
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = getelementptr inbounds i8, i8* %y, i32 48
-  %21 = bitcast i8* %20 to <16 x i8>*
-  %wide.load12.3 = load <16 x i8>, <16 x i8>* %21, align 1
-  %22 = mul <16 x i8> %wide.load12.3, %wide.load.3
-  %23 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %22)
-  %24 = add i8 %23, %17
-  ret i8 %24
-}
-
-define zeroext i8 @mlav128i8i8(i8* %x, i8* %y) {
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %wide.load12 = load <16 x i8>, ptr %y, align 1
+  %0 = mul <16 x i8> %wide.load12, %wide.load
+  %1 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %0)
+  %2 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %2, align 1
+  %3 = getelementptr inbounds i8, ptr %y, i32 16
+  %wide.load12.1 = load <16 x i8>, ptr %3, align 1
+  %4 = mul <16 x i8> %wide.load12.1, %wide.load.1
+  %5 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %4)
+  %6 = add i8 %5, %1
+  %7 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %7, align 1
+  %8 = getelementptr inbounds i8, ptr %y, i32 32
+  %wide.load12.2 = load <16 x i8>, ptr %8, align 1
+  %9 = mul <16 x i8> %wide.load12.2, %wide.load.2
+  %10 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %9)
+  %11 = add i8 %10, %6
+  %12 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %12, align 1
+  %13 = getelementptr inbounds i8, ptr %y, i32 48
+  %wide.load12.3 = load <16 x i8>, ptr %13, align 1
+  %14 = mul <16 x i8> %wide.load12.3, %wide.load.3
+  %15 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %14)
+  %16 = add i8 %15, %11
+  ret i8 %16
+}
+
+define zeroext i8 @mlav128i8i8(ptr %x, ptr %y) {
 ; CHECK-LABEL: mlav128i8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -3700,76 +3295,60 @@ define zeroext i8 @mlav128i8i8(i8* %x, i8* %y) {
 ; CHECK-NEXT:    uxtb r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast i8* %x to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %0, align 1
-  %1 = bitcast i8* %y to <16 x i8>*
-  %wide.load12 = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = mul <16 x i8> %wide.load12, %wide.load
-  %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2)
-  %4 = getelementptr inbounds i8, i8* %x, i32 16
-  %5 = bitcast i8* %4 to <16 x i8>*
-  %wide.load.1 = load <16 x i8>, <16 x i8>* %5, align 1
-  %6 = getelementptr inbounds i8, i8* %y, i32 16
-  %7 = bitcast i8* %6 to <16 x i8>*
-  %wide.load12.1 = load <16 x i8>, <16 x i8>* %7, align 1
-  %8 = mul <16 x i8> %wide.load12.1, %wide.load.1
-  %9 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %8)
-  %10 = add i8 %9, %3
-  %11 = getelementptr inbounds i8, i8* %x, i32 32
-  %12 = bitcast i8* %11 to <16 x i8>*
-  %wide.load.2 = load <16 x i8>, <16 x i8>* %12, align 1
-  %13 = getelementptr inbounds i8, i8* %y, i32 32
-  %14 = bitcast i8* %13 to <16 x i8>*
-  %wide.load12.2 = load <16 x i8>, <16 x i8>* %14, align 1
-  %15 = mul <16 x i8> %wide.load12.2, %wide.load.2
-  %16 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %15)
-  %17 = add i8 %16, %10
-  %18 = getelementptr inbounds i8, i8* %x, i32 48
-  %19 = bitcast i8* %18 to <16 x i8>*
-  %wide.load.3 = load <16 x i8>, <16 x i8>* %19, align 1
-  %20 = getelementptr inbounds i8, i8* %y, i32 48
-  %21 = bitcast i8* %20 to <16 x i8>*
-  %wide.load12.3 = load <16 x i8>, <16 x i8>* %21, align 1
-  %22 = mul <16 x i8> %wide.load12.3, %wide.load.3
-  %23 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %22)
-  %24 = add i8 %23, %17
-  %25 = getelementptr inbounds i8, i8* %x, i32 64
-  %26 = bitcast i8* %25 to <16 x i8>*
-  %wide.load.4 = load <16 x i8>, <16 x i8>* %26, align 1
-  %27 = getelementptr inbounds i8, i8* %y, i32 64
-  %28 = bitcast i8* %27 to <16 x i8>*
-  %wide.load12.4 = load <16 x i8>, <16 x i8>* %28, align 1
-  %29 = mul <16 x i8> %wide.load12.4, %wide.load.4
+  %wide.load = load <16 x i8>, ptr %x, align 1
+  %wide.load12 = load <16 x i8>, ptr %y, align 1
+  %0 = mul <16 x i8> %wide.load12, %wide.load
+  %1 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %0)
+  %2 = getelementptr inbounds i8, ptr %x, i32 16
+  %wide.load.1 = load <16 x i8>, ptr %2, align 1
+  %3 = getelementptr inbounds i8, ptr %y, i32 16
+  %wide.load12.1 = load <16 x i8>, ptr %3, align 1
+  %4 = mul <16 x i8> %wide.load12.1, %wide.load.1
+  %5 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %4)
+  %6 = add i8 %5, %1
+  %7 = getelementptr inbounds i8, ptr %x, i32 32
+  %wide.load.2 = load <16 x i8>, ptr %7, align 1
+  %8 = getelementptr inbounds i8, ptr %y, i32 32
+  %wide.load12.2 = load <16 x i8>, ptr %8, align 1
+  %9 = mul <16 x i8> %wide.load12.2, %wide.load.2
+  %10 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %9)
+  %11 = add i8 %10, %6
+  %12 = getelementptr inbounds i8, ptr %x, i32 48
+  %wide.load.3 = load <16 x i8>, ptr %12, align 1
+  %13 = getelementptr inbounds i8, ptr %y, i32 48
+  %wide.load12.3 = load <16 x i8>, ptr %13, align 1
+  %14 = mul <16 x i8> %wide.load12.3, %wide.load.3
+  %15 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %14)
+  %16 = add i8 %15, %11
+  %17 = getelementptr inbounds i8, ptr %x, i32 64
+  %wide.load.4 = load <16 x i8>, ptr %17, align 1
+  %18 = getelementptr inbounds i8, ptr %y, i32 64
+  %wide.load12.4 = load <16 x i8>, ptr %18, align 1
+  %19 = mul <16 x i8> %wide.load12.4, %wide.load.4
+  %20 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %19)
+  %21 = add i8 %20, %16
+  %22 = getelementptr inbounds i8, ptr %x, i32 80
+  %wide.load.5 = load <16 x i8>, ptr %22, align 1
+  %23 = getelementptr inbounds i8, ptr %y, i32 80
+  %wide.load12.5 = load <16 x i8>, ptr %23, align 1
+  %24 = mul <16 x i8> %wide.load12.5, %wide.load.5
+  %25 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %24)
+  %26 = add i8 %25, %21
+  %27 = getelementptr inbounds i8, ptr %x, i32 96
+  %wide.load.6 = load <16 x i8>, ptr %27, align 1
+  %28 = getelementptr inbounds i8, ptr %y, i32 96
+  %wide.load12.6 = load <16 x i8>, ptr %28, align 1
+  %29 = mul <16 x i8> %wide.load12.6, %wide.load.6
   %30 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %29)
-  %31 = add i8 %30, %24
-  %32 = getelementptr inbounds i8, i8* %x, i32 80
-  %33 = bitcast i8* %32 to <16 x i8>*
-  %wide.load.5 = load <16 x i8>, <16 x i8>* %33, align 1
-  %34 = getelementptr inbounds i8, i8* %y, i32 80
-  %35 = bitcast i8* %34 to <16 x i8>*
-  %wide.load12.5 = load <16 x i8>, <16 x i8>* %35, align 1
-  %36 = mul <16 x i8> %wide.load12.5, %wide.load.5
-  %37 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %36)
-  %38 = add i8 %37, %31
-  %39 = getelementptr inbounds i8, i8* %x, i32 96
-  %40 = bitcast i8* %39 to <16 x i8>*
-  %wide.load.6 = load <16 x i8>, <16 x i8>* %40, align 1
-  %41 = getelementptr inbounds i8, i8* %y, i32 96
-  %42 = bitcast i8* %41 to <16 x i8>*
-  %wide.load12.6 = load <16 x i8>, <16 x i8>* %42, align 1
-  %43 = mul <16 x i8> %wide.load12.6, %wide.load.6
-  %44 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %43)
-  %45 = add i8 %44, %38
-  %46 = getelementptr inbounds i8, i8* %x, i32 112
-  %47 = bitcast i8* %46 to <16 x i8>*
-  %wide.load.7 = load <16 x i8>, <16 x i8>* %47, align 1
-  %48 = getelementptr inbounds i8, i8* %y, i32 112
-  %49 = bitcast i8* %48 to <16 x i8>*
-  %wide.load12.7 = load <16 x i8>, <16 x i8>* %49, align 1
-  %50 = mul <16 x i8> %wide.load12.7, %wide.load.7
-  %51 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %50)
-  %52 = add i8 %51, %45
-  ret i8 %52
+  %31 = add i8 %30, %26
+  %32 = getelementptr inbounds i8, ptr %x, i32 112
+  %wide.load.7 = load <16 x i8>, ptr %32, align 1
+  %33 = getelementptr inbounds i8, ptr %y, i32 112
+  %wide.load12.7 = load <16 x i8>, ptr %33, align 1
+  %34 = mul <16 x i8> %wide.load12.7, %wide.load.7
+  %35 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %34)
+  %36 = add i8 %35, %31
+  ret i8 %36
 }
 
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vector-spill.ll b/llvm/test/CodeGen/Thumb2/mve-vector-spill.ll
index 647ad2e8182e8..0af77c4fc8283 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vector-spill.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vector-spill.ll
@@ -3,7 +3,7 @@
 
 declare void @external_function()
 
-define arm_aapcs_vfpcc void @spill_vector_i32(<4 x i32> %v, <4 x i32>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_i32(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -20,11 +20,11 @@ define arm_aapcs_vfpcc void @spill_vector_i32(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <4 x i32> %v, <4 x i32>* %p, align 4
+  store <4 x i32> %v, ptr %p, align 4
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_i16(<8 x i16> %v, <8 x i16>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_i16(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -41,11 +41,11 @@ define arm_aapcs_vfpcc void @spill_vector_i16(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <8 x i16> %v, <8 x i16>* %p, align 2
+  store <8 x i16> %v, ptr %p, align 2
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_i8(<16 x i8> %v, <16 x i8>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_i8(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -62,11 +62,11 @@ define arm_aapcs_vfpcc void @spill_vector_i8(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <16 x i8> %v, <16 x i8>* %p, align 1
+  store <16 x i8> %v, ptr %p, align 1
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_i64(<2 x i64> %v, <2 x i64>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_i64(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -83,11 +83,11 @@ define arm_aapcs_vfpcc void @spill_vector_i64(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <2 x i64> %v, <2 x i64>* %p, align 8
+  store <2 x i64> %v, ptr %p, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_f32(<4 x float> %v, <4 x float>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_f32(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -104,11 +104,11 @@ define arm_aapcs_vfpcc void @spill_vector_f32(<4 x float> %v, <4 x float>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <4 x float> %v, <4 x float>* %p, align 8
+  store <4 x float> %v, ptr %p, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_f16(<8 x half> %v, <8 x half>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_f16(<8 x half> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -125,11 +125,11 @@ define arm_aapcs_vfpcc void @spill_vector_f16(<8 x half> %v, <8 x half>* %p) {
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <8 x half> %v, <8 x half>* %p, align 8
+  store <8 x half> %v, ptr %p, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @spill_vector_f64(<2 x double> %v, <2 x double>* %p) {
+define arm_aapcs_vfpcc void @spill_vector_f64(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: spill_vector_f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -146,6 +146,6 @@ define arm_aapcs_vfpcc void @spill_vector_f64(<2 x double> %v, <2 x double>* %p)
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
   call void @external_function()
-  store <2 x double> %v, <2 x double>* %p, align 8
+  store <2 x double> %v, ptr %p, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
index 3b3c5c704a59e..248a929e858bf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vhadd.ll
@@ -408,7 +408,7 @@ entry:
 
 
 
-define void @vhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -428,29 +428,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = sext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load16 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = sext <16 x i8> %wide.load16 to <16 x i16>
-  %6 = add nsw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %z, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  store <16 x i8> %8, <16 x i8>* %10, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = sext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load16 = load <16 x i8>, ptr %2, align 1
+  %3 = sext <16 x i8> %wide.load16 to <16 x i16>
+  %4 = add nsw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %6, ptr %7, align 1
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -470,29 +467,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = sext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load16 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load16 to <8 x i32>
-  %6 = add nsw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %z, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  store <8 x i16> %8, <8 x i16>* %10, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = sext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load16 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load16 to <8 x i32>
+  %4 = add nsw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %6, ptr %7, align 2
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -512,29 +506,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = sext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = sext <4 x i32> %wide.load16 to <4 x i64>
-  %6 = add nsw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 1, i64 1, i64 1, i64 1>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %z, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  store <4 x i32> %8, <4 x i32>* %10, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = sext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load16 = load <4 x i32>, ptr %2, align 4
+  %3 = sext <4 x i32> %wide.load16 to <4 x i64>
+  %4 = add nsw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 1, i64 1, i64 1, i64 1>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %6, ptr %7, align 4
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -554,29 +545,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = zext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load16 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load16 to <16 x i16>
-  %6 = add nuw nsw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %z, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  store <16 x i8> %8, <16 x i8>* %10, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = zext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load16 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load16 to <16 x i16>
+  %4 = add nuw nsw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %6, ptr %7, align 1
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -596,29 +584,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = zext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load16 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = zext <8 x i16> %wide.load16 to <8 x i32>
-  %6 = add nuw nsw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %z, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  store <8 x i16> %8, <8 x i16>* %10, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = zext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load16 = load <8 x i16>, ptr %2, align 2
+  %3 = zext <8 x i16> %wide.load16 to <8 x i32>
+  %4 = add nuw nsw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %6, ptr %7, align 2
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vhadd_loop_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -638,29 +623,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = zext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = zext <4 x i32> %wide.load16 to <4 x i64>
-  %6 = add nuw nsw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 1, i64 1, i64 1, i64 1>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %z, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  store <4 x i32> %8, <4 x i32>* %10, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = zext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load16 = load <4 x i32>, ptr %2, align 4
+  %3 = zext <4 x i32> %wide.load16 to <4 x i64>
+  %4 = add nuw nsw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 1, i64 1, i64 1, i64 1>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %6, ptr %7, align 4
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -680,30 +662,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = zext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load16 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load16 to <16 x i16>
-  %6 = add nuw nsw <16 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %7 = add nuw nsw <16 x i16> %6, %5
-  %8 = lshr <16 x i16> %7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %9 = trunc <16 x i16> %8 to <16 x i8>
-  %10 = getelementptr inbounds i8, i8* %z, i32 %index
-  %11 = bitcast i8* %10 to <16 x i8>*
-  store <16 x i8> %9, <16 x i8>* %11, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = zext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load16 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load16 to <16 x i16>
+  %4 = add nuw nsw <16 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %5 = add nuw nsw <16 x i16> %4, %3
+  %6 = lshr <16 x i16> %5, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %7 = trunc <16 x i16> %6 to <16 x i8>
+  %8 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %7, ptr %8, align 1
   %index.next = add i32 %index, 16
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -723,30 +702,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = zext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load16 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = zext <8 x i16> %wide.load16 to <8 x i32>
-  %6 = add nuw nsw <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %7 = add nuw nsw <8 x i32> %6, %5
-  %8 = lshr <8 x i32> %7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %9 = trunc <8 x i32> %8 to <8 x i16>
-  %10 = getelementptr inbounds i16, i16* %z, i32 %index
-  %11 = bitcast i16* %10 to <8 x i16>*
-  store <8 x i16> %9, <8 x i16>* %11, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = zext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load16 = load <8 x i16>, ptr %2, align 2
+  %3 = zext <8 x i16> %wide.load16 to <8 x i32>
+  %4 = add nuw nsw <8 x i32> %1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %5 = add nuw nsw <8 x i32> %4, %3
+  %6 = lshr <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %7 = trunc <8 x i32> %6 to <8 x i16>
+  %8 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %7, ptr %8, align 2
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_s32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -766,30 +742,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = zext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = zext <4 x i32> %wide.load16 to <4 x i64>
-  %6 = add nuw nsw <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
-  %7 = add nuw nsw <4 x i64> %6, %5
-  %8 = lshr <4 x i64> %7, <i64 1, i64 1, i64 1, i64 1>
-  %9 = trunc <4 x i64> %8 to <4 x i32>
-  %10 = getelementptr inbounds i32, i32* %z, i32 %index
-  %11 = bitcast i32* %10 to <4 x i32>*
-  store <4 x i32> %9, <4 x i32>* %11, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = zext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load16 = load <4 x i32>, ptr %2, align 4
+  %3 = zext <4 x i32> %wide.load16 to <4 x i64>
+  %4 = add nuw nsw <4 x i64> %1, <i64 1, i64 1, i64 1, i64 1>
+  %5 = add nuw nsw <4 x i64> %4, %3
+  %6 = lshr <4 x i64> %5, <i64 1, i64 1, i64 1, i64 1>
+  %7 = trunc <4 x i64> %6 to <4 x i32>
+  %8 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -809,30 +782,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = zext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load16 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load16 to <16 x i16>
-  %6 = add nuw nsw <16 x i16> %2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %7 = add nuw nsw <16 x i16> %6, %5
-  %8 = lshr <16 x i16> %7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %9 = trunc <16 x i16> %8 to <16 x i8>
-  %10 = getelementptr inbounds i8, i8* %z, i32 %index
-  %11 = bitcast i8* %10 to <16 x i8>*
-  store <16 x i8> %9, <16 x i8>* %11, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = zext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load16 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load16 to <16 x i16>
+  %4 = add nuw nsw <16 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %5 = add nuw nsw <16 x i16> %4, %3
+  %6 = lshr <16 x i16> %5, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %7 = trunc <16 x i16> %6 to <16 x i8>
+  %8 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %7, ptr %8, align 1
   %index.next = add i32 %index, 16
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -852,30 +822,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = zext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load16 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = zext <8 x i16> %wide.load16 to <8 x i32>
-  %6 = add nuw nsw <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %7 = add nuw nsw <8 x i32> %6, %5
-  %8 = lshr <8 x i32> %7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %9 = trunc <8 x i32> %8 to <8 x i16>
-  %10 = getelementptr inbounds i16, i16* %z, i32 %index
-  %11 = bitcast i16* %10 to <8 x i16>*
-  store <8 x i16> %9, <8 x i16>* %11, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = zext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load16 = load <8 x i16>, ptr %2, align 2
+  %3 = zext <8 x i16> %wide.load16 to <8 x i32>
+  %4 = add nuw nsw <8 x i32> %1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %5 = add nuw nsw <8 x i32> %4, %3
+  %6 = lshr <8 x i32> %5, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %7 = trunc <8 x i32> %6 to <8 x i16>
+  %8 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %7, ptr %8, align 2
   %index.next = add i32 %index, 8
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vrhadd_loop_u32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vrhadd_loop_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vrhadd_loop_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -895,24 +862,21 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = zext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = zext <4 x i32> %wide.load16 to <4 x i64>
-  %6 = add nuw nsw <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
-  %7 = add nuw nsw <4 x i64> %6, %5
-  %8 = lshr <4 x i64> %7, <i64 1, i64 1, i64 1, i64 1>
-  %9 = trunc <4 x i64> %8 to <4 x i32>
-  %10 = getelementptr inbounds i32, i32* %z, i32 %index
-  %11 = bitcast i32* %10 to <4 x i32>*
-  store <4 x i32> %9, <4 x i32>* %11, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = zext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load16 = load <4 x i32>, ptr %2, align 4
+  %3 = zext <4 x i32> %wide.load16 to <4 x i64>
+  %4 = add nuw nsw <4 x i64> %1, <i64 1, i64 1, i64 1, i64 1>
+  %5 = add nuw nsw <4 x i64> %4, %3
+  %6 = lshr <4 x i64> %5, <i64 1, i64 1, i64 1, i64 1>
+  %7 = trunc <4 x i64> %6 to <4 x i32>
+  %8 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %7, ptr %8, align 4
   %index.next = add i32 %index, 4
-  %12 = icmp eq i32 %index.next, 1024
-  br i1 %12, label %for.cond.cleanup, label %vector.body
+  %9 = icmp eq i32 %index.next, 1024
+  br i1 %9, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld2-post.ll b/llvm/test/CodeGen/Thumb2/mve-vld2-post.ll
index 178eb8f831508..43ef7994fcec9 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld2-post.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld2-post.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define <8 x i32> *@vld2_v4i32(<8 x i32> *%src, <4 x i32> *%dst) {
+define ptr @vld2_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -12,18 +12,18 @@ define <8 x i32> *@vld2_v4i32(<8 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i32>, <8 x i32>* %src, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
   %s1 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i32> %s1, %s2
-  store <4 x i32> %a, <4 x i32> *%dst
-  %ret = getelementptr inbounds <8 x i32>, <8 x i32>* %src, i32 1
-  ret <8 x i32> *%ret
+  store <4 x i32> %a, ptr %dst
+  %ret = getelementptr inbounds <8 x i32>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i16
 
-define <16 x i16> *@vld2_v8i16(<16 x i16> *%src, <8 x i16> *%dst) {
+define ptr @vld2_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -32,18 +32,18 @@ define <16 x i16> *@vld2_v8i16(<16 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i16>, <16 x i16>* %src, align 4
+  %l1 = load <16 x i16>, ptr %src, align 4
   %s1 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = add <8 x i16> %s1, %s2
-  store <8 x i16> %a, <8 x i16> *%dst
-  %ret = getelementptr inbounds <16 x i16>, <16 x i16>* %src, i32 1
-  ret <16 x i16> *%ret
+  store <8 x i16> %a, ptr %dst
+  %ret = getelementptr inbounds <16 x i16>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i8
 
-define <32 x i8> *@vld2_v16i8(<32 x i8> *%src, <16 x i8> *%dst) {
+define ptr @vld2_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.8 {q0, q1}, [r0]
@@ -52,18 +52,18 @@ define <32 x i8> *@vld2_v16i8(<32 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i8>, <32 x i8>* %src, align 4
+  %l1 = load <32 x i8>, ptr %src, align 4
   %s1 = shufflevector <32 x i8> %l1, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x i8> %l1, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = add <16 x i8> %s1, %s2
-  store <16 x i8> %a, <16 x i8> *%dst
-  %ret = getelementptr inbounds <32 x i8>, <32 x i8>* %src, i32 1
-  ret <32 x i8> *%ret
+  store <16 x i8> %a, ptr %dst
+  %ret = getelementptr inbounds <32 x i8>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i64
 
-define <4 x i64> *@vld2_v2i64(<4 x i64> *%src, <2 x i64> *%dst) {
+define ptr @vld2_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -83,18 +83,18 @@ define <4 x i64> *@vld2_v2i64(<4 x i64> *%src, <2 x i64> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %l1 = load <4 x i64>, <4 x i64>* %src, align 4
+  %l1 = load <4 x i64>, ptr %src, align 4
   %s1 = shufflevector <4 x i64> %l1, <4 x i64> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x i64> %l1, <4 x i64> undef, <2 x i32> <i32 1, i32 3>
   %a = add <2 x i64> %s1, %s2
-  store <2 x i64> %a, <2 x i64> *%dst
-  %ret = getelementptr inbounds <4 x i64>, <4 x i64>* %src, i32 1
-  ret <4 x i64> *%ret
+  store <2 x i64> %a, ptr %dst
+  %ret = getelementptr inbounds <4 x i64>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f32
 
-define <8 x float> *@vld2_v4f32(<8 x float> *%src, <4 x float> *%dst) {
+define ptr @vld2_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -103,18 +103,18 @@ define <8 x float> *@vld2_v4f32(<8 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x float>, <8 x float>* %src, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
   %s1 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = fadd <4 x float> %s1, %s2
-  store <4 x float> %a, <4 x float> *%dst
-  %ret = getelementptr inbounds <8 x float>, <8 x float>* %src, i32 1
-  ret <8 x float> *%ret
+  store <4 x float> %a, ptr %dst
+  %ret = getelementptr inbounds <8 x float>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f16
 
-define <16 x half> *@vld2_v8f16(<16 x half> *%src, <8 x half> *%dst) {
+define ptr @vld2_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -123,18 +123,18 @@ define <16 x half> *@vld2_v8f16(<16 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x half>, <16 x half>* %src, align 4
+  %l1 = load <16 x half>, ptr %src, align 4
   %s1 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = fadd <8 x half> %s1, %s2
-  store <8 x half> %a, <8 x half> *%dst
-  %ret = getelementptr inbounds <16 x half>, <16 x half>* %src, i32 1
-  ret <16 x half> *%ret
+  store <8 x half> %a, ptr %dst
+  %ret = getelementptr inbounds <16 x half>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f64
 
-define <4 x double> *@vld2_v2f64(<4 x double> *%src, <2 x double> *%dst) {
+define ptr @vld2_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -144,11 +144,11 @@ define <4 x double> *@vld2_v2f64(<4 x double> *%src, <2 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x double>, <4 x double>* %src, align 4
+  %l1 = load <4 x double>, ptr %src, align 4
   %s1 = shufflevector <4 x double> %l1, <4 x double> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x double> %l1, <4 x double> undef, <2 x i32> <i32 1, i32 3>
   %a = fadd <2 x double> %s1, %s2
-  store <2 x double> %a, <2 x double> *%dst
-  %ret = getelementptr inbounds <4 x double>, <4 x double>* %src, i32 1
-  ret <4 x double> *%ret
+  store <2 x double> %a, ptr %dst
+  %ret = getelementptr inbounds <4 x double>, ptr %src, i32 1
+  ret ptr %ret
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld2.ll b/llvm/test/CodeGen/Thumb2/mve-vld2.ll
index 2198bfa127f52..537fa993d1658 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld2.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld2.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vld2_v2i32(<4 x i32> *%src, <2 x i32> *%dst) {
+define void @vld2_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -17,15 +17,15 @@ define void @vld2_v2i32(<4 x i32> *%src, <2 x i32> *%dst) {
 ; CHECK-NEXT:    strd r2, r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i32>, <4 x i32>* %src, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
   %s1 = shufflevector <4 x i32> %l1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x i32> %l1, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
   %a = add <2 x i32> %s1, %s2
-  store <2 x i32> %a, <2 x i32> *%dst
+  store <2 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4i32(<8 x i32> *%src, <4 x i32> *%dst) {
+define void @vld2_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -34,15 +34,15 @@ define void @vld2_v4i32(<8 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i32>, <8 x i32>* %src, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
   %s1 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i32> %s1, %s2
-  store <4 x i32> %a, <4 x i32> *%dst
+  store <4 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8i32(<16 x i32> *%src, <8 x i32> *%dst) {
+define void @vld2_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -55,15 +55,15 @@ define void @vld2_v8i32(<16 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i32>, <16 x i32>* %src, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
   %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = add <8 x i32> %s1, %s2
-  store <8 x i32> %a, <8 x i32> *%dst
+  store <8 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v16i32(<32 x i32> *%src, <16 x i32> *%dst) {
+define void @vld2_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -89,15 +89,15 @@ define void @vld2_v16i32(<32 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i32>, <32 x i32>* %src, align 4
+  %l1 = load <32 x i32>, ptr %src, align 4
   %s1 = shufflevector <32 x i32> %l1, <32 x i32> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x i32> %l1, <32 x i32> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = add <16 x i32> %s1, %s2
-  store <16 x i32> %a, <16 x i32> *%dst
+  store <16 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4i32_align1(<8 x i32> *%src, <4 x i32> *%dst) {
+define void @vld2_v4i32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #16]
@@ -113,17 +113,17 @@ define void @vld2_v4i32_align1(<8 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i32>, <8 x i32>* %src, align 1
+  %l1 = load <8 x i32>, ptr %src, align 1
   %s1 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i32> %l1, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i32> %s1, %s2
-  store <4 x i32> %a, <4 x i32> *%dst
+  store <4 x i32> %a, ptr %dst
   ret void
 }
 
 ; i16
 
-define void @vld2_v2i16(<4 x i16> *%src, <2 x i16> *%dst) {
+define void @vld2_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0]
@@ -138,15 +138,15 @@ define void @vld2_v2i16(<4 x i16> *%src, <2 x i16> *%dst) {
 ; CHECK-NEXT:    strh r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i16>, <4 x i16>* %src, align 2
+  %l1 = load <4 x i16>, ptr %src, align 2
   %s1 = shufflevector <4 x i16> %l1, <4 x i16> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x i16> %l1, <4 x i16> undef, <2 x i32> <i32 1, i32 3>
   %a = add <2 x i16> %s1, %s2
-  store <2 x i16> %a, <2 x i16> *%dst
+  store <2 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4i16(<8 x i16> *%src, <4 x i16> *%dst) {
+define void @vld2_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -155,15 +155,15 @@ define void @vld2_v4i16(<8 x i16> *%src, <4 x i16> *%dst) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i16>, <8 x i16>* %src, align 2
+  %l1 = load <8 x i16>, ptr %src, align 2
   %s1 = shufflevector <8 x i16> %l1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i16> %l1, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i16> %s1, %s2
-  store <4 x i16> %a, <4 x i16> *%dst
+  store <4 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8i16(<16 x i16> *%src, <8 x i16> *%dst) {
+define void @vld2_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -172,15 +172,15 @@ define void @vld2_v8i16(<16 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i16>, <16 x i16>* %src, align 2
+  %l1 = load <16 x i16>, ptr %src, align 2
   %s1 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = add <8 x i16> %s1, %s2
-  store <8 x i16> %a, <8 x i16> *%dst
+  store <8 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v16i16(<32 x i16> *%src, <16 x i16> *%dst) {
+define void @vld2_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -193,15 +193,15 @@ define void @vld2_v16i16(<32 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i16>, <32 x i16>* %src, align 2
+  %l1 = load <32 x i16>, ptr %src, align 2
   %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = add <16 x i16> %s1, %s2
-  store <16 x i16> %a, <16 x i16> *%dst
+  store <16 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8i16_align1(<16 x i16> *%src, <8 x i16> *%dst) {
+define void @vld2_v8i16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8i16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -229,17 +229,17 @@ define void @vld2_v8i16_align1(<16 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i16>, <16 x i16>* %src, align 1
+  %l1 = load <16 x i16>, ptr %src, align 1
   %s1 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x i16> %l1, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = add <8 x i16> %s1, %s2
-  store <8 x i16> %a, <8 x i16> *%dst
+  store <8 x i16> %a, ptr %dst
   ret void
 }
 
 ; i8
 
-define void @vld2_v2i8(<4 x i8> *%src, <2 x i8> *%dst) {
+define void @vld2_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0]
@@ -254,15 +254,15 @@ define void @vld2_v2i8(<4 x i8> *%src, <2 x i8> *%dst) {
 ; CHECK-NEXT:    strb r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x i8>, <4 x i8>* %src, align 1
+  %l1 = load <4 x i8>, ptr %src, align 1
   %s1 = shufflevector <4 x i8> %l1, <4 x i8> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x i8> %l1, <4 x i8> undef, <2 x i32> <i32 1, i32 3>
   %a = add <2 x i8> %s1, %s2
-  store <2 x i8> %a, <2 x i8> *%dst
+  store <2 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4i8(<8 x i8> *%src, <4 x i8> *%dst) {
+define void @vld2_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -271,15 +271,15 @@ define void @vld2_v4i8(<8 x i8> *%src, <4 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i8>, <8 x i8>* %src, align 1
+  %l1 = load <8 x i8>, ptr %src, align 1
   %s1 = shufflevector <8 x i8> %l1, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i8> %l1, <8 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i8> %s1, %s2
-  store <4 x i8> %a, <4 x i8> *%dst
+  store <4 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8i8(<16 x i8> *%src, <8 x i8> *%dst) {
+define void @vld2_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -288,15 +288,15 @@ define void @vld2_v8i8(<16 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i8>, <16 x i8>* %src, align 1
+  %l1 = load <16 x i8>, ptr %src, align 1
   %s1 = shufflevector <16 x i8> %l1, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x i8> %l1, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = add <8 x i8> %s1, %s2
-  store <8 x i8> %a, <8 x i8> *%dst
+  store <8 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v16i8(<32 x i8> *%src, <16 x i8> *%dst) {
+define void @vld2_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.8 {q0, q1}, [r0]
@@ -305,17 +305,17 @@ define void @vld2_v16i8(<32 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i8>, <32 x i8>* %src, align 1
+  %l1 = load <32 x i8>, ptr %src, align 1
   %s1 = shufflevector <32 x i8> %l1, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x i8> %l1, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = add <16 x i8> %s1, %s2
-  store <16 x i8> %a, <16 x i8> *%dst
+  store <16 x i8> %a, ptr %dst
   ret void
 }
 
 ; i64
 
-define void @vld2_v2i64(<4 x i64> *%src, <2 x i64> *%dst) {
+define void @vld2_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -335,15 +335,15 @@ define void @vld2_v2i64(<4 x i64> *%src, <2 x i64> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %l1 = load <4 x i64>, <4 x i64>* %src, align 8
+  %l1 = load <4 x i64>, ptr %src, align 8
   %s1 = shufflevector <4 x i64> %l1, <4 x i64> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x i64> %l1, <4 x i64> undef, <2 x i32> <i32 1, i32 3>
   %a = add <2 x i64> %s1, %s2
-  store <2 x i64> %a, <2 x i64> *%dst
+  store <2 x i64> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4i64(<8 x i64> *%src, <4 x i64> *%dst) {
+define void @vld2_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -387,17 +387,17 @@ define void @vld2_v4i64(<8 x i64> *%src, <4 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %l1 = load <8 x i64>, <8 x i64>* %src, align 8
+  %l1 = load <8 x i64>, ptr %src, align 8
   %s1 = shufflevector <8 x i64> %l1, <8 x i64> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x i64> %l1, <8 x i64> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = add <4 x i64> %s1, %s2
-  store <4 x i64> %a, <4 x i64> *%dst
+  store <4 x i64> %a, ptr %dst
   ret void
 }
 
 ; f32
 
-define void @vld2_v2f32(<4 x float> *%src, <2 x float> *%dst) {
+define void @vld2_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -408,15 +408,15 @@ define void @vld2_v2f32(<4 x float> *%src, <2 x float> *%dst) {
 ; CHECK-NEXT:    vstmia r1, {s0, s1}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x float>, <4 x float>* %src, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
   %s1 = shufflevector <4 x float> %l1, <4 x float> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x float> %l1, <4 x float> undef, <2 x i32> <i32 1, i32 3>
   %a = fadd <2 x float> %s1, %s2
-  store <2 x float> %a, <2 x float> *%dst
+  store <2 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4f32(<8 x float> *%src, <4 x float> *%dst) {
+define void @vld2_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -425,15 +425,15 @@ define void @vld2_v4f32(<8 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x float>, <8 x float>* %src, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
   %s1 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = fadd <4 x float> %s1, %s2
-  store <4 x float> %a, <4 x float> *%dst
+  store <4 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8f32(<16 x float> *%src, <8 x float> *%dst) {
+define void @vld2_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.32 {q0, q1}, [r0]
@@ -446,15 +446,15 @@ define void @vld2_v8f32(<16 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x float>, <16 x float>* %src, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
   %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = fadd <8 x float> %s1, %s2
-  store <8 x float> %a, <8 x float> *%dst
+  store <8 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v16f32(<32 x float> *%src, <16 x float> *%dst) {
+define void @vld2_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -480,15 +480,15 @@ define void @vld2_v16f32(<32 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x float>, <32 x float>* %src, align 4
+  %l1 = load <32 x float>, ptr %src, align 4
   %s1 = shufflevector <32 x float> %l1, <32 x float> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x float> %l1, <32 x float> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = fadd <16 x float> %s1, %s2
-  store <16 x float> %a, <16 x float> *%dst
+  store <16 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4f32_align1(<8 x float> *%src, <4 x float> *%dst) {
+define void @vld2_v4f32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4f32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0, #16]
@@ -504,17 +504,17 @@ define void @vld2_v4f32_align1(<8 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x float>, <8 x float>* %src, align 1
+  %l1 = load <8 x float>, ptr %src, align 1
   %s1 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x float> %l1, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = fadd <4 x float> %s1, %s2
-  store <4 x float> %a, <4 x float> *%dst
+  store <4 x float> %a, ptr %dst
   ret void
 }
 
 ; f16
 
-define void @vld2_v2f16(<4 x half> *%src, <2 x half> *%dst) {
+define void @vld2_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0]
@@ -530,15 +530,15 @@ define void @vld2_v2f16(<4 x half> *%src, <2 x half> *%dst) {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x half>, <4 x half>* %src, align 2
+  %l1 = load <4 x half>, ptr %src, align 2
   %s1 = shufflevector <4 x half> %l1, <4 x half> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x half> %l1, <4 x half> undef, <2 x i32> <i32 1, i32 3>
   %a = fadd <2 x half> %s1, %s2
-  store <2 x half> %a, <2 x half> *%dst
+  store <2 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4f16(<8 x half> *%src, <4 x half> *%dst) {
+define void @vld2_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -556,15 +556,15 @@ define void @vld2_v4f16(<8 x half> *%src, <4 x half> *%dst) {
 ; CHECK-NEXT:    strd r0, r2, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x half>, <8 x half>* %src, align 2
+  %l1 = load <8 x half>, ptr %src, align 2
   %s1 = shufflevector <8 x half> %l1, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x half> %l1, <8 x half> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = fadd <4 x half> %s1, %s2
-  store <4 x half> %a, <4 x half> *%dst
+  store <4 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8f16(<16 x half> *%src, <8 x half> *%dst) {
+define void @vld2_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -573,15 +573,15 @@ define void @vld2_v8f16(<16 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x half>, <16 x half>* %src, align 2
+  %l1 = load <16 x half>, ptr %src, align 2
   %s1 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = fadd <8 x half> %s1, %s2
-  store <8 x half> %a, <8 x half> *%dst
+  store <8 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v16f16(<32 x half> *%src, <16 x half> *%dst) {
+define void @vld2_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld20.16 {q0, q1}, [r0]
@@ -594,15 +594,15 @@ define void @vld2_v16f16(<32 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x half>, <32 x half>* %src, align 2
+  %l1 = load <32 x half>, ptr %src, align 2
   %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   %a = fadd <16 x half> %s1, %s2
-  store <16 x half> %a, <16 x half> *%dst
+  store <16 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v8f16_align1(<16 x half> *%src, <8 x half> *%dst) {
+define void @vld2_v8f16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v8f16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -630,17 +630,17 @@ define void @vld2_v8f16_align1(<16 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x half>, <16 x half>* %src, align 1
+  %l1 = load <16 x half>, ptr %src, align 1
   %s1 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %s2 = shufflevector <16 x half> %l1, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   %a = fadd <8 x half> %s1, %s2
-  store <8 x half> %a, <8 x half> *%dst
+  store <8 x half> %a, ptr %dst
   ret void
 }
 
 ; f64
 
-define void @vld2_v2f64(<4 x double> *%src, <2 x double> *%dst) {
+define void @vld2_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -650,15 +650,15 @@ define void @vld2_v2f64(<4 x double> *%src, <2 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <4 x double>, <4 x double>* %src, align 8
+  %l1 = load <4 x double>, ptr %src, align 8
   %s1 = shufflevector <4 x double> %l1, <4 x double> undef, <2 x i32> <i32 0, i32 2>
   %s2 = shufflevector <4 x double> %l1, <4 x double> undef, <2 x i32> <i32 1, i32 3>
   %a = fadd <2 x double> %s1, %s2
-  store <2 x double> %a, <2 x double> *%dst
+  store <2 x double> %a, ptr %dst
   ret void
 }
 
-define void @vld2_v4f64(<8 x double> *%src, <4 x double> *%dst) {
+define void @vld2_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld2_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
@@ -673,10 +673,10 @@ define void @vld2_v4f64(<8 x double> *%src, <4 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x double>, <8 x double>* %src, align 8
+  %l1 = load <8 x double>, ptr %src, align 8
   %s1 = shufflevector <8 x double> %l1, <8 x double> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %s2 = shufflevector <8 x double> %l1, <8 x double> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   %a = fadd <4 x double> %s1, %s2
-  store <4 x double> %a, <4 x double> *%dst
+  store <4 x double> %a, ptr %dst
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 888053d994f4f..ec74cba3d979e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vld3_v2i32(<6 x i32> *%src, <2 x i32> *%dst) {
+define void @vld3_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -21,17 +21,17 @@ define void @vld3_v2i32(<6 x i32> *%src, <2 x i32> *%dst) {
 ; CHECK-NEXT:    strd r2, r0, [r1]
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %l1 = load <6 x i32>, <6 x i32>* %src, align 4
+  %l1 = load <6 x i32>, ptr %src, align 4
   %s1 = shufflevector <6 x i32> %l1, <6 x i32> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x i32> %l1, <6 x i32> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x i32> %l1, <6 x i32> undef, <2 x i32> <i32 2, i32 5>
   %a1 = add <2 x i32> %s1, %s2
   %a = add <2 x i32> %a1, %s3
-  store <2 x i32> %a, <2 x i32> *%dst
+  store <2 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4i32(<12 x i32> *%src, <4 x i32> *%dst) {
+define void @vld3_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -56,17 +56,17 @@ define void @vld3_v4i32(<12 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <12 x i32>, <12 x i32>* %src, align 4
+  %l1 = load <12 x i32>, ptr %src, align 4
   %s1 = shufflevector <12 x i32> %l1, <12 x i32> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x i32> %l1, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x i32> %l1, <12 x i32> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = add <4 x i32> %s1, %s2
   %a = add <4 x i32> %a1, %s3
-  store <4 x i32> %a, <4 x i32> *%dst
+  store <4 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v8i32(<24 x i32> *%src, <8 x i32> *%dst) {
+define void @vld3_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -108,17 +108,17 @@ define void @vld3_v8i32(<24 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <24 x i32>, <24 x i32>* %src, align 4
+  %l1 = load <24 x i32>, ptr %src, align 4
   %s1 = shufflevector <24 x i32> %l1, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
   %s2 = shufflevector <24 x i32> %l1, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
   %s3 = shufflevector <24 x i32> %l1, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
   %a1 = add <8 x i32> %s1, %s2
   %a = add <8 x i32> %a1, %s3
-  store <8 x i32> %a, <8 x i32> *%dst
+  store <8 x i32> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v16i32(<48 x i32> *%src, <16 x i32> *%dst) {
+define void @vld3_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -194,19 +194,19 @@ define void @vld3_v16i32(<48 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <48 x i32>, <48 x i32>* %src, align 4
+  %l1 = load <48 x i32>, ptr %src, align 4
   %s1 = shufflevector <48 x i32> %l1, <48 x i32> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
   %s2 = shufflevector <48 x i32> %l1, <48 x i32> undef, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
   %s3 = shufflevector <48 x i32> %l1, <48 x i32> undef, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
   %a1 = add <16 x i32> %s1, %s2
   %a = add <16 x i32> %a1, %s3
-  store <16 x i32> %a, <16 x i32> *%dst
+  store <16 x i32> %a, ptr %dst
   ret void
 }
 
 ; i16
 
-define void @vld3_v2i16(<6 x i16> *%src, <2 x i16> *%dst) {
+define void @vld3_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -236,17 +236,17 @@ define void @vld3_v2i16(<6 x i16> *%src, <2 x i16> *%dst) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <6 x i16>, <6 x i16>* %src, align 4
+  %l1 = load <6 x i16>, ptr %src, align 4
   %s1 = shufflevector <6 x i16> %l1, <6 x i16> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x i16> %l1, <6 x i16> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x i16> %l1, <6 x i16> undef, <2 x i32> <i32 2, i32 5>
   %a1 = add <2 x i16> %s1, %s2
   %a = add <2 x i16> %a1, %s3
-  store <2 x i16> %a, <2 x i16> *%dst
+  store <2 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4i16(<12 x i16> *%src, <4 x i16> *%dst) {
+define void @vld3_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -274,17 +274,17 @@ define void @vld3_v4i16(<12 x i16> *%src, <4 x i16> *%dst) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %l1 = load <12 x i16>, <12 x i16>* %src, align 4
+  %l1 = load <12 x i16>, ptr %src, align 4
   %s1 = shufflevector <12 x i16> %l1, <12 x i16> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x i16> %l1, <12 x i16> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x i16> %l1, <12 x i16> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = add <4 x i16> %s1, %s2
   %a = add <4 x i16> %a1, %s3
-  store <4 x i16> %a, <4 x i16> *%dst
+  store <4 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v8i16(<24 x i16> *%src, <8 x i16> *%dst) {
+define void @vld3_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -329,17 +329,17 @@ define void @vld3_v8i16(<24 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <24 x i16>, <24 x i16>* %src, align 4
+  %l1 = load <24 x i16>, ptr %src, align 4
   %s1 = shufflevector <24 x i16> %l1, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
   %s2 = shufflevector <24 x i16> %l1, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
   %s3 = shufflevector <24 x i16> %l1, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
   %a1 = add <8 x i16> %s1, %s2
   %a = add <8 x i16> %a1, %s3
-  store <8 x i16> %a, <8 x i16> *%dst
+  store <8 x i16> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
+define void @vld3_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -421,19 +421,19 @@ define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <48 x i16>, <48 x i16>* %src, align 4
+  %l1 = load <48 x i16>, ptr %src, align 4
   %s1 = shufflevector <48 x i16> %l1, <48 x i16> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
   %s2 = shufflevector <48 x i16> %l1, <48 x i16> undef, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
   %s3 = shufflevector <48 x i16> %l1, <48 x i16> undef, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
   %a1 = add <16 x i16> %s1, %s2
   %a = add <16 x i16> %a1, %s3
-  store <16 x i16> %a, <16 x i16> *%dst
+  store <16 x i16> %a, ptr %dst
   ret void
 }
 
 ; i8
 
-define void @vld3_v2i8(<6 x i8> *%src, <2 x i8> *%dst) {
+define void @vld3_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -457,17 +457,17 @@ define void @vld3_v2i8(<6 x i8> *%src, <2 x i8> *%dst) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <6 x i8>, <6 x i8>* %src, align 4
+  %l1 = load <6 x i8>, ptr %src, align 4
   %s1 = shufflevector <6 x i8> %l1, <6 x i8> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x i8> %l1, <6 x i8> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x i8> %l1, <6 x i8> undef, <2 x i32> <i32 2, i32 5>
   %a1 = add <2 x i8> %s1, %s2
   %a = add <2 x i8> %a1, %s3
-  store <2 x i8> %a, <2 x i8> *%dst
+  store <2 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4i8(<12 x i8> *%src, <4 x i8> *%dst) {
+define void @vld3_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -503,17 +503,17 @@ define void @vld3_v4i8(<12 x i8> *%src, <4 x i8> *%dst) {
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %l1 = load <12 x i8>, <12 x i8>* %src, align 4
+  %l1 = load <12 x i8>, ptr %src, align 4
   %s1 = shufflevector <12 x i8> %l1, <12 x i8> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x i8> %l1, <12 x i8> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x i8> %l1, <12 x i8> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = add <4 x i8> %s1, %s2
   %a = add <4 x i8> %a1, %s3
-  store <4 x i8> %a, <4 x i8> *%dst
+  store <4 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v8i8(<24 x i8> *%src, <8 x i8> *%dst) {
+define void @vld3_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -571,17 +571,17 @@ define void @vld3_v8i8(<24 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <24 x i8>, <24 x i8>* %src, align 4
+  %l1 = load <24 x i8>, ptr %src, align 4
   %s1 = shufflevector <24 x i8> %l1, <24 x i8> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
   %s2 = shufflevector <24 x i8> %l1, <24 x i8> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
   %s3 = shufflevector <24 x i8> %l1, <24 x i8> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
   %a1 = add <8 x i8> %s1, %s2
   %a = add <8 x i8> %a1, %s3
-  store <8 x i8> %a, <8 x i8> *%dst
+  store <8 x i8> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
+define void @vld3_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -697,19 +697,19 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <48 x i8>, <48 x i8>* %src, align 4
+  %l1 = load <48 x i8>, ptr %src, align 4
   %s1 = shufflevector <48 x i8> %l1, <48 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
   %s2 = shufflevector <48 x i8> %l1, <48 x i8> undef, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
   %s3 = shufflevector <48 x i8> %l1, <48 x i8> undef, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
   %a1 = add <16 x i8> %s1, %s2
   %a = add <16 x i8> %a1, %s3
-  store <16 x i8> %a, <16 x i8> *%dst
+  store <16 x i8> %a, ptr %dst
   ret void
 }
 
 ; i64
 
-define void @vld3_v2i64(<6 x i64> *%src, <2 x i64> *%dst) {
+define void @vld3_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -740,17 +740,17 @@ define void @vld3_v2i64(<6 x i64> *%src, <2 x i64> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %l1 = load <6 x i64>, <6 x i64>* %src, align 4
+  %l1 = load <6 x i64>, ptr %src, align 4
   %s1 = shufflevector <6 x i64> %l1, <6 x i64> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x i64> %l1, <6 x i64> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x i64> %l1, <6 x i64> undef, <2 x i32> <i32 2, i32 5>
   %a1 = add <2 x i64> %s1, %s2
   %a = add <2 x i64> %a1, %s3
-  store <2 x i64> %a, <2 x i64> *%dst
+  store <2 x i64> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4i64(<12 x i64> *%src, <4 x i64> *%dst) {
+define void @vld3_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -808,19 +808,19 @@ define void @vld3_v4i64(<12 x i64> *%src, <4 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %l1 = load <12 x i64>, <12 x i64>* %src, align 4
+  %l1 = load <12 x i64>, ptr %src, align 4
   %s1 = shufflevector <12 x i64> %l1, <12 x i64> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x i64> %l1, <12 x i64> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x i64> %l1, <12 x i64> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = add <4 x i64> %s1, %s2
   %a = add <4 x i64> %a1, %s3
-  store <4 x i64> %a, <4 x i64> *%dst
+  store <4 x i64> %a, ptr %dst
   ret void
 }
 
 ; f32
 
-define void @vld3_v2f32(<6 x float> *%src, <2 x float> *%dst) {
+define void @vld3_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q2, [r0]
@@ -835,17 +835,17 @@ define void @vld3_v2f32(<6 x float> *%src, <2 x float> *%dst) {
 ; CHECK-NEXT:    vstmia r1, {s0, s1}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <6 x float>, <6 x float>* %src, align 4
+  %l1 = load <6 x float>, ptr %src, align 4
   %s1 = shufflevector <6 x float> %l1, <6 x float> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x float> %l1, <6 x float> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x float> %l1, <6 x float> undef, <2 x i32> <i32 2, i32 5>
   %a1 = fadd <2 x float> %s1, %s2
   %a = fadd <2 x float> %a1, %s3
-  store <2 x float> %a, <2 x float> *%dst
+  store <2 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4f32(<12 x float> *%src, <4 x float> *%dst) {
+define void @vld3_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -870,17 +870,17 @@ define void @vld3_v4f32(<12 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <12 x float>, <12 x float>* %src, align 4
+  %l1 = load <12 x float>, ptr %src, align 4
   %s1 = shufflevector <12 x float> %l1, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x float> %l1, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x float> %l1, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = fadd <4 x float> %s1, %s2
   %a = fadd <4 x float> %a1, %s3
-  store <4 x float> %a, <4 x float> *%dst
+  store <4 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v8f32(<24 x float> *%src, <8 x float> *%dst) {
+define void @vld3_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -922,17 +922,17 @@ define void @vld3_v8f32(<24 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <24 x float>, <24 x float>* %src, align 4
+  %l1 = load <24 x float>, ptr %src, align 4
   %s1 = shufflevector <24 x float> %l1, <24 x float> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
   %s2 = shufflevector <24 x float> %l1, <24 x float> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
   %s3 = shufflevector <24 x float> %l1, <24 x float> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
   %a1 = fadd <8 x float> %s1, %s2
   %a = fadd <8 x float> %a1, %s3
-  store <8 x float> %a, <8 x float> *%dst
+  store <8 x float> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v16f32(<48 x float> *%src, <16 x float> *%dst) {
+define void @vld3_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1008,19 +1008,19 @@ define void @vld3_v16f32(<48 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <48 x float>, <48 x float>* %src, align 4
+  %l1 = load <48 x float>, ptr %src, align 4
   %s1 = shufflevector <48 x float> %l1, <48 x float> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
   %s2 = shufflevector <48 x float> %l1, <48 x float> undef, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
   %s3 = shufflevector <48 x float> %l1, <48 x float> undef, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
   %a1 = fadd <16 x float> %s1, %s2
   %a = fadd <16 x float> %a1, %s3
-  store <16 x float> %a, <16 x float> *%dst
+  store <16 x float> %a, ptr %dst
   ret void
 }
 
 ; f16
 
-define void @vld3_v2f16(<6 x half> *%src, <2 x half> *%dst) {
+define void @vld3_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r3, [r0]
@@ -1040,17 +1040,17 @@ define void @vld3_v2f16(<6 x half> *%src, <2 x half> *%dst) {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <6 x half>, <6 x half>* %src, align 4
+  %l1 = load <6 x half>, ptr %src, align 4
   %s1 = shufflevector <6 x half> %l1, <6 x half> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x half> %l1, <6 x half> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x half> %l1, <6 x half> undef, <2 x i32> <i32 2, i32 5>
   %a1 = fadd <2 x half> %s1, %s2
   %a = fadd <2 x half> %a1, %s3
-  store <2 x half> %a, <2 x half> *%dst
+  store <2 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4f16(<12 x half> *%src, <4 x half> *%dst) {
+define void @vld3_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r3, [r0, #16]
@@ -1078,17 +1078,17 @@ define void @vld3_v4f16(<12 x half> *%src, <4 x half> *%dst) {
 ; CHECK-NEXT:    strd r0, r2, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <12 x half>, <12 x half>* %src, align 4
+  %l1 = load <12 x half>, ptr %src, align 4
   %s1 = shufflevector <12 x half> %l1, <12 x half> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x half> %l1, <12 x half> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x half> %l1, <12 x half> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = fadd <4 x half> %s1, %s2
   %a = fadd <4 x half> %a1, %s3
-  store <4 x half> %a, <4 x half> *%dst
+  store <4 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v8f16(<24 x half> *%src, <8 x half> *%dst) {
+define void @vld3_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1133,17 +1133,17 @@ define void @vld3_v8f16(<24 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <24 x half>, <24 x half>* %src, align 4
+  %l1 = load <24 x half>, ptr %src, align 4
   %s1 = shufflevector <24 x half> %l1, <24 x half> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
   %s2 = shufflevector <24 x half> %l1, <24 x half> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
   %s3 = shufflevector <24 x half> %l1, <24 x half> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
   %a1 = fadd <8 x half> %s1, %s2
   %a = fadd <8 x half> %a1, %s3
-  store <8 x half> %a, <8 x half> *%dst
+  store <8 x half> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
+define void @vld3_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1225,19 +1225,19 @@ define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <48 x half>, <48 x half>* %src, align 4
+  %l1 = load <48 x half>, ptr %src, align 4
   %s1 = shufflevector <48 x half> %l1, <48 x half> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
   %s2 = shufflevector <48 x half> %l1, <48 x half> undef, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
   %s3 = shufflevector <48 x half> %l1, <48 x half> undef, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
   %a1 = fadd <16 x half> %s1, %s2
   %a = fadd <16 x half> %a1, %s3
-  store <16 x half> %a, <16 x half> *%dst
+  store <16 x half> %a, ptr %dst
   ret void
 }
 
 ; f64
 
-define void @vld3_v2f64(<6 x double> *%src, <2 x double> *%dst) {
+define void @vld3_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #32]
@@ -1250,17 +1250,17 @@ define void @vld3_v2f64(<6 x double> *%src, <2 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <6 x double>, <6 x double>* %src, align 4
+  %l1 = load <6 x double>, ptr %src, align 4
   %s1 = shufflevector <6 x double> %l1, <6 x double> undef, <2 x i32> <i32 0, i32 3>
   %s2 = shufflevector <6 x double> %l1, <6 x double> undef, <2 x i32> <i32 1, i32 4>
   %s3 = shufflevector <6 x double> %l1, <6 x double> undef, <2 x i32> <i32 2, i32 5>
   %a1 = fadd <2 x double> %s1, %s2
   %a = fadd <2 x double> %a1, %s3
-  store <2 x double> %a, <2 x double> *%dst
+  store <2 x double> %a, ptr %dst
   ret void
 }
 
-define void @vld3_v4f64(<12 x double> *%src, <4 x double> *%dst) {
+define void @vld3_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld3_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1284,12 +1284,12 @@ define void @vld3_v4f64(<12 x double> *%src, <4 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <12 x double>, <12 x double>* %src, align 4
+  %l1 = load <12 x double>, ptr %src, align 4
   %s1 = shufflevector <12 x double> %l1, <12 x double> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %s2 = shufflevector <12 x double> %l1, <12 x double> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %s3 = shufflevector <12 x double> %l1, <12 x double> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
   %a1 = fadd <4 x double> %s1, %s2
   %a = fadd <4 x double> %a1, %s3
-  store <4 x double> %a, <4 x double> *%dst
+  store <4 x double> %a, ptr %dst
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld4-post.ll b/llvm/test/CodeGen/Thumb2/mve-vld4-post.ll
index 8ddfb5fb44878..1adc1269feab5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld4-post.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld4-post.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define <16 x i32> *@vld4_v4i32(<16 x i32> *%src, <4 x i32> *%dst) {
+define ptr @vld4_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.32 {q0, q1, q2, q3}, [r0]
@@ -16,7 +16,7 @@ define <16 x i32> *@vld4_v4i32(<16 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i32>, <16 x i32>* %src, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
   %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -24,14 +24,14 @@ entry:
   %a1 = add <4 x i32> %s1, %s2
   %a2 = add <4 x i32> %s3, %s4
   %a3 = add <4 x i32> %a1, %a2
-  store <4 x i32> %a3, <4 x i32> *%dst
-  %ret = getelementptr inbounds <16 x i32>, <16 x i32>* %src, i32 1
-  ret <16 x i32> *%ret
+  store <4 x i32> %a3, ptr %dst
+  %ret = getelementptr inbounds <16 x i32>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i16
 
-define <32 x i16> *@vld4_v8i16(<32 x i16> *%src, <8 x i16> *%dst) {
+define ptr @vld4_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.16 {q0, q1, q2, q3}, [r0]
@@ -44,7 +44,7 @@ define <32 x i16> *@vld4_v8i16(<32 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i16>, <32 x i16>* %src, align 4
+  %l1 = load <32 x i16>, ptr %src, align 4
   %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -52,14 +52,14 @@ entry:
   %a1 = add <8 x i16> %s1, %s2
   %a2 = add <8 x i16> %s3, %s4
   %a3 = add <8 x i16> %a1, %a2
-  store <8 x i16> %a3, <8 x i16> *%dst
-  %ret = getelementptr inbounds <32 x i16>, <32 x i16>* %src, i32 1
-  ret <32 x i16> *%ret
+  store <8 x i16> %a3, ptr %dst
+  %ret = getelementptr inbounds <32 x i16>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i8
 
-define <64 x i8> *@vld4_v16i8(<64 x i8> *%src, <16 x i8> *%dst) {
+define ptr @vld4_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.8 {q0, q1, q2, q3}, [r0]
@@ -72,7 +72,7 @@ define <64 x i8> *@vld4_v16i8(<64 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x i8>, <64 x i8>* %src, align 4
+  %l1 = load <64 x i8>, ptr %src, align 4
   %s1 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -80,14 +80,14 @@ entry:
   %a1 = add <16 x i8> %s1, %s2
   %a2 = add <16 x i8> %s3, %s4
   %a3 = add <16 x i8> %a1, %a2
-  store <16 x i8> %a3, <16 x i8> *%dst
-  %ret = getelementptr inbounds <64 x i8>, <64 x i8>* %src, i32 1
-  ret <64 x i8> *%ret
+  store <16 x i8> %a3, ptr %dst
+  %ret = getelementptr inbounds <64 x i8>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; i64
 
-define <8 x i64> *@vld4_v2i64(<8 x i64> *%src, <2 x i64> *%dst) {
+define ptr @vld4_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -132,7 +132,7 @@ define <8 x i64> *@vld4_v2i64(<8 x i64> *%src, <2 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %l1 = load <8 x i64>, <8 x i64>* %src, align 4
+  %l1 = load <8 x i64>, ptr %src, align 4
   %s1 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 2, i32 6>
@@ -140,14 +140,14 @@ entry:
   %a1 = add <2 x i64> %s1, %s2
   %a2 = add <2 x i64> %s3, %s4
   %a3 = add <2 x i64> %a1, %a2
-  store <2 x i64> %a3, <2 x i64> *%dst
-  %ret = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 1
-  ret <8 x i64> *%ret
+  store <2 x i64> %a3, ptr %dst
+  %ret = getelementptr inbounds <8 x i64>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f32
 
-define <16 x float> *@vld4_v4f32(<16 x float> *%src, <4 x float> *%dst) {
+define ptr @vld4_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.32 {q0, q1, q2, q3}, [r0]
@@ -160,7 +160,7 @@ define <16 x float> *@vld4_v4f32(<16 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x float>, <16 x float>* %src, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
   %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -168,14 +168,14 @@ entry:
   %a1 = fadd <4 x float> %s1, %s2
   %a2 = fadd <4 x float> %s3, %s4
   %a3 = fadd <4 x float> %a1, %a2
-  store <4 x float> %a3, <4 x float> *%dst
-  %ret = getelementptr inbounds <16 x float>, <16 x float>* %src, i32 1
-  ret <16 x float> *%ret
+  store <4 x float> %a3, ptr %dst
+  %ret = getelementptr inbounds <16 x float>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f16
 
-define <32 x half> *@vld4_v8f16(<32 x half> *%src, <8 x half> *%dst) {
+define ptr @vld4_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.16 {q0, q1, q2, q3}, [r0]
@@ -188,7 +188,7 @@ define <32 x half> *@vld4_v8f16(<32 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x half>, <32 x half>* %src, align 4
+  %l1 = load <32 x half>, ptr %src, align 4
   %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -196,14 +196,14 @@ entry:
   %a1 = fadd <8 x half> %s1, %s2
   %a2 = fadd <8 x half> %s3, %s4
   %a3 = fadd <8 x half> %a1, %a2
-  store <8 x half> %a3, <8 x half> *%dst
-  %ret = getelementptr inbounds <32 x half>, <32 x half>* %src, i32 1
-  ret <32 x half> *%ret
+  store <8 x half> %a3, ptr %dst
+  %ret = getelementptr inbounds <32 x half>, ptr %src, i32 1
+  ret ptr %ret
 }
 
 ; f64
 
-define <8 x double> *@vld4_v2f64(<8 x double> *%src, <2 x double> *%dst) {
+define ptr @vld4_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
@@ -219,7 +219,7 @@ define <8 x double> *@vld4_v2f64(<8 x double> *%src, <2 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x double>, <8 x double>* %src, align 4
+  %l1 = load <8 x double>, ptr %src, align 4
   %s1 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 2, i32 6>
@@ -227,7 +227,7 @@ entry:
   %a1 = fadd <2 x double> %s1, %s2
   %a2 = fadd <2 x double> %s3, %s4
   %a3 = fadd <2 x double> %a1, %a2
-  store <2 x double> %a3, <2 x double> *%dst
-  %ret = getelementptr inbounds <8 x double>, <8 x double>* %src, i32 1
-  ret <8 x double> *%ret
+  store <2 x double> %a3, ptr %dst
+  %ret = getelementptr inbounds <8 x double>, ptr %src, i32 1
+  ret ptr %ret
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld4.ll b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
index 5058013576343..f8f3d16eddc5e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vld4_v2i32(<8 x i32> *%src, <2 x i32> *%dst) {
+define void @vld4_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -29,7 +29,7 @@ define void @vld4_v2i32(<8 x i32> *%src, <2 x i32> *%dst) {
 ; CHECK-NEXT:    strd r0, r12, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i32>, <8 x i32>* %src, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
   %s1 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x i32> %l1, <8 x i32> undef, <2 x i32> <i32 2, i32 6>
@@ -37,11 +37,11 @@ entry:
   %a1 = add <2 x i32> %s1, %s2
   %a2 = add <2 x i32> %s3, %s4
   %a3 = add <2 x i32> %a1, %a2
-  store <2 x i32> %a3, <2 x i32> *%dst
+  store <2 x i32> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4i32(<16 x i32> *%src, <4 x i32> *%dst) {
+define void @vld4_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.32 {q0, q1, q2, q3}, [r0]
@@ -54,7 +54,7 @@ define void @vld4_v4i32(<16 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i32>, <16 x i32>* %src, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
   %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -62,11 +62,11 @@ entry:
   %a1 = add <4 x i32> %s1, %s2
   %a2 = add <4 x i32> %s3, %s4
   %a3 = add <4 x i32> %a1, %a2
-  store <4 x i32> %a3, <4 x i32> *%dst
+  store <4 x i32> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8i32(<32 x i32> *%src, <8 x i32> *%dst) {
+define void @vld4_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -90,7 +90,7 @@ define void @vld4_v8i32(<32 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i32>, <32 x i32>* %src, align 4
+  %l1 = load <32 x i32>, ptr %src, align 4
   %s1 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x i32> %l1, <32 x i32> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -98,11 +98,11 @@ entry:
   %a1 = add <8 x i32> %s1, %s2
   %a2 = add <8 x i32> %s3, %s4
   %a3 = add <8 x i32> %a1, %a2
-  store <8 x i32> %a3, <8 x i32> *%dst
+  store <8 x i32> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v16i32(<64 x i32> *%src, <16 x i32> *%dst) {
+define void @vld4_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -152,7 +152,7 @@ define void @vld4_v16i32(<64 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x i32>, <64 x i32>* %src, align 4
+  %l1 = load <64 x i32>, ptr %src, align 4
   %s1 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x i32> %l1, <64 x i32> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -160,11 +160,11 @@ entry:
   %a1 = add <16 x i32> %s1, %s2
   %a2 = add <16 x i32> %s3, %s4
   %a3 = add <16 x i32> %a1, %a2
-  store <16 x i32> %a3, <16 x i32> *%dst
+  store <16 x i32> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4i32_align1(<16 x i32> *%src, <4 x i32> *%dst) {
+define void @vld4_v4i32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -195,7 +195,7 @@ define void @vld4_v4i32_align1(<16 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i32>, <16 x i32>* %src, align 1
+  %l1 = load <16 x i32>, ptr %src, align 1
   %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -203,13 +203,13 @@ entry:
   %a1 = add <4 x i32> %s1, %s2
   %a2 = add <4 x i32> %s3, %s4
   %a3 = add <4 x i32> %a1, %a2
-  store <4 x i32> %a3, <4 x i32> *%dst
+  store <4 x i32> %a3, ptr %dst
   ret void
 }
 
 ; i16
 
-define void @vld4_v2i16(<8 x i16> *%src, <2 x i16> *%dst) {
+define void @vld4_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -231,7 +231,7 @@ define void @vld4_v2i16(<8 x i16> *%src, <2 x i16> *%dst) {
 ; CHECK-NEXT:    strh r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i16>, <8 x i16>* %src, align 2
+  %l1 = load <8 x i16>, ptr %src, align 2
   %s1 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x i16> %l1, <8 x i16> undef, <2 x i32> <i32 2, i32 6>
@@ -239,11 +239,11 @@ entry:
   %a1 = add <2 x i16> %s1, %s2
   %a2 = add <2 x i16> %s3, %s4
   %a3 = add <2 x i16> %a1, %a2
-  store <2 x i16> %a3, <2 x i16> *%dst
+  store <2 x i16> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4i16(<16 x i16> *%src, <4 x i16> *%dst) {
+define void @vld4_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -281,7 +281,7 @@ define void @vld4_v4i16(<16 x i16> *%src, <4 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i16>, <16 x i16>* %src, align 2
+  %l1 = load <16 x i16>, ptr %src, align 2
   %s1 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i16> %l1, <16 x i16> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -289,11 +289,11 @@ entry:
   %a1 = add <4 x i16> %s1, %s2
   %a2 = add <4 x i16> %s3, %s4
   %a3 = add <4 x i16> %a1, %a2
-  store <4 x i16> %a3, <4 x i16> *%dst
+  store <4 x i16> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8i16(<32 x i16> *%src, <8 x i16> *%dst) {
+define void @vld4_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.16 {q0, q1, q2, q3}, [r0]
@@ -306,7 +306,7 @@ define void @vld4_v8i16(<32 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i16>, <32 x i16>* %src, align 2
+  %l1 = load <32 x i16>, ptr %src, align 2
   %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -314,11 +314,11 @@ entry:
   %a1 = add <8 x i16> %s1, %s2
   %a2 = add <8 x i16> %s3, %s4
   %a3 = add <8 x i16> %a1, %a2
-  store <8 x i16> %a3, <8 x i16> *%dst
+  store <8 x i16> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v16i16(<64 x i16> *%src, <16 x i16> *%dst) {
+define void @vld4_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -342,7 +342,7 @@ define void @vld4_v16i16(<64 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x i16>, <64 x i16>* %src, align 2
+  %l1 = load <64 x i16>, ptr %src, align 2
   %s1 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x i16> %l1, <64 x i16> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -350,11 +350,11 @@ entry:
   %a1 = add <16 x i16> %s1, %s2
   %a2 = add <16 x i16> %s3, %s4
   %a3 = add <16 x i16> %a1, %a2
-  store <16 x i16> %a3, <16 x i16> *%dst
+  store <16 x i16> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8i16_align1(<32 x i16> *%src, <8 x i16> *%dst) {
+define void @vld4_v8i16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8i16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -409,7 +409,7 @@ define void @vld4_v8i16_align1(<32 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i16>, <32 x i16>* %src, align 1
+  %l1 = load <32 x i16>, ptr %src, align 1
   %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -417,13 +417,13 @@ entry:
   %a1 = add <8 x i16> %s1, %s2
   %a2 = add <8 x i16> %s3, %s4
   %a3 = add <8 x i16> %a1, %a2
-  store <8 x i16> %a3, <8 x i16> *%dst
+  store <8 x i16> %a3, ptr %dst
   ret void
 }
 
 ; i8
 
-define void @vld4_v2i8(<8 x i8> *%src, <2 x i8> *%dst) {
+define void @vld4_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0]
@@ -445,7 +445,7 @@ define void @vld4_v2i8(<8 x i8> *%src, <2 x i8> *%dst) {
 ; CHECK-NEXT:    strb r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x i8>, <8 x i8>* %src, align 1
+  %l1 = load <8 x i8>, ptr %src, align 1
   %s1 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x i8> %l1, <8 x i8> undef, <2 x i32> <i32 2, i32 6>
@@ -453,11 +453,11 @@ entry:
   %a1 = add <2 x i8> %s1, %s2
   %a2 = add <2 x i8> %s3, %s4
   %a3 = add <2 x i8> %a1, %a2
-  store <2 x i8> %a3, <2 x i8> *%dst
+  store <2 x i8> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4i8(<16 x i8> *%src, <4 x i8> *%dst) {
+define void @vld4_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u8 q0, [r0]
@@ -475,7 +475,7 @@ define void @vld4_v4i8(<16 x i8> *%src, <4 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x i8>, <16 x i8>* %src, align 1
+  %l1 = load <16 x i8>, ptr %src, align 1
   %s1 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i8> %l1, <16 x i8> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -483,11 +483,11 @@ entry:
   %a1 = add <4 x i8> %s1, %s2
   %a2 = add <4 x i8> %s3, %s4
   %a3 = add <4 x i8> %a1, %a2
-  store <4 x i8> %a3, <4 x i8> *%dst
+  store <4 x i8> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8i8(<32 x i8> *%src, <8 x i8> *%dst) {
+define void @vld4_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -565,7 +565,7 @@ define void @vld4_v8i8(<32 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x i8>, <32 x i8>* %src, align 1
+  %l1 = load <32 x i8>, ptr %src, align 1
   %s1 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x i8> %l1, <32 x i8> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -573,11 +573,11 @@ entry:
   %a1 = add <8 x i8> %s1, %s2
   %a2 = add <8 x i8> %s3, %s4
   %a3 = add <8 x i8> %a1, %a2
-  store <8 x i8> %a3, <8 x i8> *%dst
+  store <8 x i8> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v16i8(<64 x i8> *%src, <16 x i8> *%dst) {
+define void @vld4_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.8 {q0, q1, q2, q3}, [r0]
@@ -590,7 +590,7 @@ define void @vld4_v16i8(<64 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x i8>, <64 x i8>* %src, align 1
+  %l1 = load <64 x i8>, ptr %src, align 1
   %s1 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -598,13 +598,13 @@ entry:
   %a1 = add <16 x i8> %s1, %s2
   %a2 = add <16 x i8> %s3, %s4
   %a3 = add <16 x i8> %a1, %a2
-  store <16 x i8> %a3, <16 x i8> *%dst
+  store <16 x i8> %a3, ptr %dst
   ret void
 }
 
 ; i64
 
-define void @vld4_v2i64(<8 x i64> *%src, <2 x i64> *%dst) {
+define void @vld4_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -649,7 +649,7 @@ define void @vld4_v2i64(<8 x i64> *%src, <2 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %l1 = load <8 x i64>, <8 x i64>* %src, align 8
+  %l1 = load <8 x i64>, ptr %src, align 8
   %s1 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> <i32 2, i32 6>
@@ -657,11 +657,11 @@ entry:
   %a1 = add <2 x i64> %s1, %s2
   %a2 = add <2 x i64> %s3, %s4
   %a3 = add <2 x i64> %a1, %a2
-  store <2 x i64> %a3, <2 x i64> *%dst
+  store <2 x i64> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4i64(<16 x i64> *%src, <4 x i64> *%dst) {
+define void @vld4_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
@@ -746,7 +746,7 @@ define void @vld4_v4i64(<16 x i64> *%src, <4 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
 entry:
-  %l1 = load <16 x i64>, <16 x i64>* %src, align 8
+  %l1 = load <16 x i64>, ptr %src, align 8
   %s1 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x i64> %l1, <16 x i64> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -754,13 +754,13 @@ entry:
   %a1 = add <4 x i64> %s1, %s2
   %a2 = add <4 x i64> %s3, %s4
   %a3 = add <4 x i64> %a1, %a2
-  store <4 x i64> %a3, <4 x i64> *%dst
+  store <4 x i64> %a3, ptr %dst
   ret void
 }
 
 ; f32
 
-define void @vld4_v2f32(<8 x float> *%src, <2 x float> *%dst) {
+define void @vld4_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -778,7 +778,7 @@ define void @vld4_v2f32(<8 x float> *%src, <2 x float> *%dst) {
 ; CHECK-NEXT:    vstmia r1, {s0, s1}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x float>, <8 x float>* %src, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
   %s1 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x float> %l1, <8 x float> undef, <2 x i32> <i32 2, i32 6>
@@ -786,11 +786,11 @@ entry:
   %a1 = fadd <2 x float> %s1, %s2
   %a2 = fadd <2 x float> %s3, %s4
   %a3 = fadd <2 x float> %a1, %a2
-  store <2 x float> %a3, <2 x float> *%dst
+  store <2 x float> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4f32(<16 x float> *%src, <4 x float> *%dst) {
+define void @vld4_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.32 {q0, q1, q2, q3}, [r0]
@@ -803,7 +803,7 @@ define void @vld4_v4f32(<16 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x float>, <16 x float>* %src, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
   %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -811,11 +811,11 @@ entry:
   %a1 = fadd <4 x float> %s1, %s2
   %a2 = fadd <4 x float> %s3, %s4
   %a3 = fadd <4 x float> %a1, %a2
-  store <4 x float> %a3, <4 x float> *%dst
+  store <4 x float> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8f32(<32 x float> *%src, <8 x float> *%dst) {
+define void @vld4_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -839,7 +839,7 @@ define void @vld4_v8f32(<32 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x float>, <32 x float>* %src, align 4
+  %l1 = load <32 x float>, ptr %src, align 4
   %s1 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x float> %l1, <32 x float> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -847,11 +847,11 @@ entry:
   %a1 = fadd <8 x float> %s1, %s2
   %a2 = fadd <8 x float> %s3, %s4
   %a3 = fadd <8 x float> %a1, %a2
-  store <8 x float> %a3, <8 x float> *%dst
+  store <8 x float> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v16f32(<64 x float> *%src, <16 x float> *%dst) {
+define void @vld4_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -901,7 +901,7 @@ define void @vld4_v16f32(<64 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x float>, <64 x float>* %src, align 4
+  %l1 = load <64 x float>, ptr %src, align 4
   %s1 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x float> %l1, <64 x float> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -909,11 +909,11 @@ entry:
   %a1 = fadd <16 x float> %s1, %s2
   %a2 = fadd <16 x float> %s3, %s4
   %a3 = fadd <16 x float> %a1, %a2
-  store <16 x float> %a3, <16 x float> *%dst
+  store <16 x float> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4f32_align1(<16 x float> *%src, <4 x float> *%dst) {
+define void @vld4_v4f32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4f32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -944,7 +944,7 @@ define void @vld4_v4f32_align1(<16 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x float>, <16 x float>* %src, align 1
+  %l1 = load <16 x float>, ptr %src, align 1
   %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -952,13 +952,13 @@ entry:
   %a1 = fadd <4 x float> %s1, %s2
   %a2 = fadd <4 x float> %s3, %s4
   %a3 = fadd <4 x float> %a1, %a2
-  store <4 x float> %a3, <4 x float> *%dst
+  store <4 x float> %a3, ptr %dst
   ret void
 }
 
 ; f16
 
-define void @vld4_v2f16(<8 x half> *%src, <2 x half> *%dst) {
+define void @vld4_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r0]
@@ -978,7 +978,7 @@ define void @vld4_v2f16(<8 x half> *%src, <2 x half> *%dst) {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x half>, <8 x half>* %src, align 2
+  %l1 = load <8 x half>, ptr %src, align 2
   %s1 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x half> %l1, <8 x half> undef, <2 x i32> <i32 2, i32 6>
@@ -986,11 +986,11 @@ entry:
   %a1 = fadd <2 x half> %s1, %s2
   %a2 = fadd <2 x half> %s3, %s4
   %a3 = fadd <2 x half> %a1, %a2
-  store <2 x half> %a3, <2 x half> *%dst
+  store <2 x half> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4f16(<16 x half> *%src, <4 x half> *%dst) {
+define void @vld4_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8}
@@ -1024,7 +1024,7 @@ define void @vld4_v4f16(<16 x half> *%src, <4 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x half>, <16 x half>* %src, align 2
+  %l1 = load <16 x half>, ptr %src, align 2
   %s1 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x half> %l1, <16 x half> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -1032,11 +1032,11 @@ entry:
   %a1 = fadd <4 x half> %s1, %s2
   %a2 = fadd <4 x half> %s3, %s4
   %a3 = fadd <4 x half> %a1, %a2
-  store <4 x half> %a3, <4 x half> *%dst
+  store <4 x half> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8f16(<32 x half> *%src, <8 x half> *%dst) {
+define void @vld4_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vld40.16 {q0, q1, q2, q3}, [r0]
@@ -1049,7 +1049,7 @@ define void @vld4_v8f16(<32 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x half>, <32 x half>* %src, align 2
+  %l1 = load <32 x half>, ptr %src, align 2
   %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -1057,11 +1057,11 @@ entry:
   %a1 = fadd <8 x half> %s1, %s2
   %a2 = fadd <8 x half> %s3, %s4
   %a3 = fadd <8 x half> %a1, %a2
-  store <8 x half> %a3, <8 x half> *%dst
+  store <8 x half> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v16f16(<64 x half> *%src, <16 x half> *%dst) {
+define void @vld4_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1085,7 +1085,7 @@ define void @vld4_v16f16(<64 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <64 x half>, <64 x half>* %src, align 2
+  %l1 = load <64 x half>, ptr %src, align 2
   %s1 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
   %s2 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
   %s3 = shufflevector <64 x half> %l1, <64 x half> undef, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
@@ -1093,11 +1093,11 @@ entry:
   %a1 = fadd <16 x half> %s1, %s2
   %a2 = fadd <16 x half> %s3, %s4
   %a3 = fadd <16 x half> %a1, %a2
-  store <16 x half> %a3, <16 x half> *%dst
+  store <16 x half> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v8f16_align1(<32 x half> *%src, <8 x half> *%dst) {
+define void @vld4_v8f16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v8f16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -1152,7 +1152,7 @@ define void @vld4_v8f16_align1(<32 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <32 x half>, <32 x half>* %src, align 1
+  %l1 = load <32 x half>, ptr %src, align 1
   %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -1160,13 +1160,13 @@ entry:
   %a1 = fadd <8 x half> %s1, %s2
   %a2 = fadd <8 x half> %s3, %s4
   %a3 = fadd <8 x half> %a1, %a2
-  store <8 x half> %a3, <8 x half> *%dst
+  store <8 x half> %a3, ptr %dst
   ret void
 }
 
 ; f64
 
-define void @vld4_v2f64(<8 x double> *%src, <2 x double> *%dst) {
+define void @vld4_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
@@ -1182,7 +1182,7 @@ define void @vld4_v2f64(<8 x double> *%src, <2 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <8 x double>, <8 x double>* %src, align 8
+  %l1 = load <8 x double>, ptr %src, align 8
   %s1 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 0, i32 4>
   %s2 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 1, i32 5>
   %s3 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> <i32 2, i32 6>
@@ -1190,11 +1190,11 @@ entry:
   %a1 = fadd <2 x double> %s1, %s2
   %a2 = fadd <2 x double> %s3, %s4
   %a3 = fadd <2 x double> %a1, %a2
-  store <2 x double> %a3, <2 x double> *%dst
+  store <2 x double> %a3, ptr %dst
   ret void
 }
 
-define void @vld4_v4f64(<16 x double> *%src, <4 x double> *%dst) {
+define void @vld4_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vld4_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1224,7 +1224,7 @@ define void @vld4_v4f64(<16 x double> *%src, <4 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %l1 = load <16 x double>, <16 x double>* %src, align 8
+  %l1 = load <16 x double>, ptr %src, align 8
   %s1 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %s2 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %s3 = shufflevector <16 x double> %l1, <16 x double> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
@@ -1232,6 +1232,6 @@ entry:
   %a1 = fadd <4 x double> %s1, %s2
   %a2 = fadd <4 x double> %s3, %s4
   %a3 = fadd <4 x double> %a1, %a2
-  store <4 x double> %a3, <4 x double> *%dst
+  store <4 x double> %a3, ptr %dst
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vldshuffle.ll b/llvm/test/CodeGen/Thumb2/mve-vldshuffle.ll
index 3dcdca3620158..2509d5af61808 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldshuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldshuffle.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @arm_cmplx_mag_squared_f16(half* nocapture readonly %pSrc, half* nocapture %pDst, i32 %numSamples) {
+define void @arm_cmplx_mag_squared_f16(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: arm_cmplx_mag_squared_f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -66,63 +66,61 @@ while.body.preheader:                             ; preds = %entry
   br i1 %min.iters.check, label %while.body.preheader26, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %while.body.preheader
-  %scevgep = getelementptr half, half* %pDst, i32 %numSamples
+  %scevgep = getelementptr half, ptr %pDst, i32 %numSamples
   %0 = shl i32 %numSamples, 1
-  %scevgep18 = getelementptr half, half* %pSrc, i32 %0
-  %bound0 = icmp ugt half* %scevgep18, %pDst
-  %bound1 = icmp ugt half* %scevgep, %pSrc
+  %scevgep18 = getelementptr half, ptr %pSrc, i32 %0
+  %bound0 = icmp ugt ptr %scevgep18, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrc
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %while.body.preheader26, label %vector.ph
 
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %numSamples, -8
   %1 = shl i32 %n.vec, 1
-  %ind.end = getelementptr half, half* %pSrc, i32 %1
-  %ind.end21 = getelementptr half, half* %pDst, i32 %n.vec
+  %ind.end = getelementptr half, ptr %pSrc, i32 %1
+  %ind.end21 = getelementptr half, ptr %pDst, i32 %n.vec
   %ind.end23 = and i32 %numSamples, 7
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %2 = shl i32 %index, 1
-  %next.gep = getelementptr half, half* %pSrc, i32 %2
-  %next.gep24 = getelementptr half, half* %pDst, i32 %index
-  %3 = bitcast half* %next.gep to <16 x half>*
-  %wide.vec = load <16 x half>, <16 x half>* %3, align 2
-  %4 = fmul fast <16 x half> %wide.vec, %wide.vec
-  %5 = shufflevector <16 x half> %4, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %6 = fmul fast <16 x half> %wide.vec, %wide.vec
-  %7 = shufflevector <16 x half> %6, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %8 = fadd fast <8 x half> %7, %5
-  %9 = bitcast half* %next.gep24 to <8 x half>*
-  store <8 x half> %8, <8 x half>* %9, align 2
+  %next.gep = getelementptr half, ptr %pSrc, i32 %2
+  %next.gep24 = getelementptr half, ptr %pDst, i32 %index
+  %wide.vec = load <16 x half>, ptr %next.gep, align 2
+  %3 = fmul fast <16 x half> %wide.vec, %wide.vec
+  %4 = shufflevector <16 x half> %3, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %5 = fmul fast <16 x half> %wide.vec, %wide.vec
+  %6 = shufflevector <16 x half> %5, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %7 = fadd fast <8 x half> %6, %4
+  store <8 x half> %7, ptr %next.gep24, align 2
   %index.next = add i32 %index, 8
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %numSamples
   br i1 %cmp.n, label %while.end, label %while.body.preheader26
 
 while.body.preheader26:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
-  %pSrc.addr.014.ph = phi half* [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
-  %pDst.addr.013.ph = phi half* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
+  %pSrc.addr.014.ph = phi ptr [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
+  %pDst.addr.013.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
   %blkCnt.012.ph = phi i32 [ %numSamples, %vector.memcheck ], [ %numSamples, %while.body.preheader ], [ %ind.end23, %middle.block ]
   br label %while.body
 
 while.body:                                       ; preds = %while.body.preheader26, %while.body
-  %pSrc.addr.014 = phi half* [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
-  %pDst.addr.013 = phi half* [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
+  %pSrc.addr.014 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
+  %pDst.addr.013 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
   %blkCnt.012 = phi i32 [ %dec, %while.body ], [ %blkCnt.012.ph, %while.body.preheader26 ]
-  %incdec.ptr = getelementptr inbounds half, half* %pSrc.addr.014, i32 1
-  %11 = load half, half* %pSrc.addr.014, align 2
-  %incdec.ptr1 = getelementptr inbounds half, half* %pSrc.addr.014, i32 2
-  %12 = load half, half* %incdec.ptr, align 2
-  %mul = fmul fast half %11, %11
-  %mul2 = fmul fast half %12, %12
+  %incdec.ptr = getelementptr inbounds half, ptr %pSrc.addr.014, i32 1
+  %9 = load half, ptr %pSrc.addr.014, align 2
+  %incdec.ptr1 = getelementptr inbounds half, ptr %pSrc.addr.014, i32 2
+  %10 = load half, ptr %incdec.ptr, align 2
+  %mul = fmul fast half %9, %9
+  %mul2 = fmul fast half %10, %10
   %add = fadd fast half %mul2, %mul
-  %incdec.ptr3 = getelementptr inbounds half, half* %pDst.addr.013, i32 1
-  store half %add, half* %pDst.addr.013, align 2
+  %incdec.ptr3 = getelementptr inbounds half, ptr %pDst.addr.013, i32 1
+  store half %add, ptr %pDst.addr.013, align 2
   %dec = add i32 %blkCnt.012, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
@@ -131,7 +129,7 @@ while.end:                                        ; preds = %while.body, %middle
   ret void
 }
 
-define void @arm_cmplx_mag_squared_f32(float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %numSamples) {
+define void @arm_cmplx_mag_squared_f32(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: arm_cmplx_mag_squared_f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -194,63 +192,61 @@ while.body.preheader:                             ; preds = %entry
   br i1 %min.iters.check, label %while.body.preheader26, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %while.body.preheader
-  %scevgep = getelementptr float, float* %pDst, i32 %numSamples
+  %scevgep = getelementptr float, ptr %pDst, i32 %numSamples
   %0 = shl i32 %numSamples, 1
-  %scevgep18 = getelementptr float, float* %pSrc, i32 %0
-  %bound0 = icmp ugt float* %scevgep18, %pDst
-  %bound1 = icmp ugt float* %scevgep, %pSrc
+  %scevgep18 = getelementptr float, ptr %pSrc, i32 %0
+  %bound0 = icmp ugt ptr %scevgep18, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrc
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %while.body.preheader26, label %vector.ph
 
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %numSamples, -4
   %1 = shl i32 %n.vec, 1
-  %ind.end = getelementptr float, float* %pSrc, i32 %1
-  %ind.end21 = getelementptr float, float* %pDst, i32 %n.vec
+  %ind.end = getelementptr float, ptr %pSrc, i32 %1
+  %ind.end21 = getelementptr float, ptr %pDst, i32 %n.vec
   %ind.end23 = and i32 %numSamples, 3
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %2 = shl i32 %index, 1
-  %next.gep = getelementptr float, float* %pSrc, i32 %2
-  %next.gep24 = getelementptr float, float* %pDst, i32 %index
-  %3 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %3, align 4
-  %4 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %5 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %6 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %7 = shufflevector <8 x float> %6, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %8 = fadd fast <4 x float> %7, %5
-  %9 = bitcast float* %next.gep24 to <4 x float>*
-  store <4 x float> %8, <4 x float>* %9, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i32 %2
+  %next.gep24 = getelementptr float, ptr %pDst, i32 %index
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
+  %3 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %4 = shufflevector <8 x float> %3, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %5 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %6 = shufflevector <8 x float> %5, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %7 = fadd fast <4 x float> %6, %4
+  store <4 x float> %7, ptr %next.gep24, align 4
   %index.next = add i32 %index, 4
-  %10 = icmp eq i32 %index.next, %n.vec
-  br i1 %10, label %middle.block, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %numSamples
   br i1 %cmp.n, label %while.end, label %while.body.preheader26
 
 while.body.preheader26:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
-  %pSrc.addr.014.ph = phi float* [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
-  %pDst.addr.013.ph = phi float* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
+  %pSrc.addr.014.ph = phi ptr [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
+  %pDst.addr.013.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
   %blkCnt.012.ph = phi i32 [ %numSamples, %vector.memcheck ], [ %numSamples, %while.body.preheader ], [ %ind.end23, %middle.block ]
   br label %while.body
 
 while.body:                                       ; preds = %while.body.preheader26, %while.body
-  %pSrc.addr.014 = phi float* [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
-  %pDst.addr.013 = phi float* [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
+  %pSrc.addr.014 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
+  %pDst.addr.013 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
   %blkCnt.012 = phi i32 [ %dec, %while.body ], [ %blkCnt.012.ph, %while.body.preheader26 ]
-  %incdec.ptr = getelementptr inbounds float, float* %pSrc.addr.014, i32 1
-  %11 = load float, float* %pSrc.addr.014, align 4
-  %incdec.ptr1 = getelementptr inbounds float, float* %pSrc.addr.014, i32 2
-  %12 = load float, float* %incdec.ptr, align 4
-  %mul = fmul fast float %11, %11
-  %mul2 = fmul fast float %12, %12
+  %incdec.ptr = getelementptr inbounds float, ptr %pSrc.addr.014, i32 1
+  %9 = load float, ptr %pSrc.addr.014, align 4
+  %incdec.ptr1 = getelementptr inbounds float, ptr %pSrc.addr.014, i32 2
+  %10 = load float, ptr %incdec.ptr, align 4
+  %mul = fmul fast float %9, %9
+  %mul2 = fmul fast float %10, %10
   %add = fadd fast float %mul2, %mul
-  %incdec.ptr3 = getelementptr inbounds float, float* %pDst.addr.013, i32 1
-  store float %add, float* %pDst.addr.013, align 4
+  %incdec.ptr3 = getelementptr inbounds float, ptr %pDst.addr.013, i32 1
+  store float %add, ptr %pDst.addr.013, align 4
   %dec = add i32 %blkCnt.012, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
@@ -259,7 +255,7 @@ while.end:                                        ; preds = %while.body, %middle
   ret void
 }
 
-define void @arm_cmplx_mag_squared_f16_cse(half* nocapture readonly %pSrc, half* nocapture %pDst, i32 %numSamples) {
+define void @arm_cmplx_mag_squared_f16_cse(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: arm_cmplx_mag_squared_f16_cse:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -324,62 +320,60 @@ while.body.preheader:                             ; preds = %entry
   br i1 %min.iters.check, label %while.body.preheader26, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %while.body.preheader
-  %scevgep = getelementptr half, half* %pDst, i32 %numSamples
+  %scevgep = getelementptr half, ptr %pDst, i32 %numSamples
   %0 = shl i32 %numSamples, 1
-  %scevgep18 = getelementptr half, half* %pSrc, i32 %0
-  %bound0 = icmp ugt half* %scevgep18, %pDst
-  %bound1 = icmp ugt half* %scevgep, %pSrc
+  %scevgep18 = getelementptr half, ptr %pSrc, i32 %0
+  %bound0 = icmp ugt ptr %scevgep18, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrc
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %while.body.preheader26, label %vector.ph
 
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %numSamples, -8
   %1 = shl i32 %n.vec, 1
-  %ind.end = getelementptr half, half* %pSrc, i32 %1
-  %ind.end21 = getelementptr half, half* %pDst, i32 %n.vec
+  %ind.end = getelementptr half, ptr %pSrc, i32 %1
+  %ind.end21 = getelementptr half, ptr %pDst, i32 %n.vec
   %ind.end23 = and i32 %numSamples, 7
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %2 = shl i32 %index, 1
-  %next.gep = getelementptr half, half* %pSrc, i32 %2
-  %next.gep24 = getelementptr half, half* %pDst, i32 %index
-  %3 = bitcast half* %next.gep to <16 x half>*
-  %wide.vec = load <16 x half>, <16 x half>* %3, align 2
-  %4 = fmul fast <16 x half> %wide.vec, %wide.vec
-  %5 = shufflevector <16 x half> %4, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %6 = shufflevector <16 x half> %4, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %7 = fadd fast <8 x half> %6, %5
-  %8 = bitcast half* %next.gep24 to <8 x half>*
-  store <8 x half> %7, <8 x half>* %8, align 2
+  %next.gep = getelementptr half, ptr %pSrc, i32 %2
+  %next.gep24 = getelementptr half, ptr %pDst, i32 %index
+  %wide.vec = load <16 x half>, ptr %next.gep, align 2
+  %3 = fmul fast <16 x half> %wide.vec, %wide.vec
+  %4 = shufflevector <16 x half> %3, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %5 = shufflevector <16 x half> %3, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %6 = fadd fast <8 x half> %5, %4
+  store <8 x half> %6, ptr %next.gep24, align 2
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %numSamples
   br i1 %cmp.n, label %while.end, label %while.body.preheader26
 
 while.body.preheader26:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
-  %pSrc.addr.014.ph = phi half* [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
-  %pDst.addr.013.ph = phi half* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
+  %pSrc.addr.014.ph = phi ptr [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
+  %pDst.addr.013.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
   %blkCnt.012.ph = phi i32 [ %numSamples, %vector.memcheck ], [ %numSamples, %while.body.preheader ], [ %ind.end23, %middle.block ]
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.preheader26
-  %pSrc.addr.014 = phi half* [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
-  %pDst.addr.013 = phi half* [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
+  %pSrc.addr.014 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
+  %pDst.addr.013 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
   %blkCnt.012 = phi i32 [ %dec, %while.body ], [ %blkCnt.012.ph, %while.body.preheader26 ]
-  %incdec.ptr = getelementptr inbounds half, half* %pSrc.addr.014, i32 1
-  %10 = load half, half* %pSrc.addr.014, align 2
-  %incdec.ptr1 = getelementptr inbounds half, half* %pSrc.addr.014, i32 2
-  %11 = load half, half* %incdec.ptr, align 2
-  %mul = fmul fast half %10, %10
-  %mul2 = fmul fast half %11, %11
+  %incdec.ptr = getelementptr inbounds half, ptr %pSrc.addr.014, i32 1
+  %8 = load half, ptr %pSrc.addr.014, align 2
+  %incdec.ptr1 = getelementptr inbounds half, ptr %pSrc.addr.014, i32 2
+  %9 = load half, ptr %incdec.ptr, align 2
+  %mul = fmul fast half %8, %8
+  %mul2 = fmul fast half %9, %9
   %add = fadd fast half %mul2, %mul
-  %incdec.ptr3 = getelementptr inbounds half, half* %pDst.addr.013, i32 1
-  store half %add, half* %pDst.addr.013, align 2
+  %incdec.ptr3 = getelementptr inbounds half, ptr %pDst.addr.013, i32 1
+  store half %add, ptr %pDst.addr.013, align 2
   %dec = add i32 %blkCnt.012, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body
@@ -388,7 +382,7 @@ while.end:                                        ; preds = %while.body, %middle
   ret void
 }
 
-define void @arm_cmplx_mag_squared_f32_cse(float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %numSamples) {
+define void @arm_cmplx_mag_squared_f32_cse(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: arm_cmplx_mag_squared_f32_cse:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -451,62 +445,60 @@ while.body.preheader:                             ; preds = %entry
   br i1 %min.iters.check, label %while.body.preheader26, label %vector.memcheck
 
 vector.memcheck:                                  ; preds = %while.body.preheader
-  %scevgep = getelementptr float, float* %pDst, i32 %numSamples
+  %scevgep = getelementptr float, ptr %pDst, i32 %numSamples
   %0 = shl i32 %numSamples, 1
-  %scevgep18 = getelementptr float, float* %pSrc, i32 %0
-  %bound0 = icmp ugt float* %scevgep18, %pDst
-  %bound1 = icmp ugt float* %scevgep, %pSrc
+  %scevgep18 = getelementptr float, ptr %pSrc, i32 %0
+  %bound0 = icmp ugt ptr %scevgep18, %pDst
+  %bound1 = icmp ugt ptr %scevgep, %pSrc
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %while.body.preheader26, label %vector.ph
 
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %numSamples, -4
   %1 = shl i32 %n.vec, 1
-  %ind.end = getelementptr float, float* %pSrc, i32 %1
-  %ind.end21 = getelementptr float, float* %pDst, i32 %n.vec
+  %ind.end = getelementptr float, ptr %pSrc, i32 %1
+  %ind.end21 = getelementptr float, ptr %pDst, i32 %n.vec
   %ind.end23 = and i32 %numSamples, 3
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %2 = shl i32 %index, 1
-  %next.gep = getelementptr float, float* %pSrc, i32 %2
-  %next.gep24 = getelementptr float, float* %pDst, i32 %index
-  %3 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %3, align 4
-  %4 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %5 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %6 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %7 = fadd fast <4 x float> %6, %5
-  %8 = bitcast float* %next.gep24 to <4 x float>*
-  store <4 x float> %7, <4 x float>* %8, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i32 %2
+  %next.gep24 = getelementptr float, ptr %pDst, i32 %index
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
+  %3 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %4 = shufflevector <8 x float> %3, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %5 = shufflevector <8 x float> %3, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %6 = fadd fast <4 x float> %5, %4
+  store <4 x float> %6, ptr %next.gep24, align 4
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %middle.block, label %vector.body
+  %7 = icmp eq i32 %index.next, %n.vec
+  br i1 %7, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i32 %n.vec, %numSamples
   br i1 %cmp.n, label %while.end, label %while.body.preheader26
 
 while.body.preheader26:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
-  %pSrc.addr.014.ph = phi float* [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
-  %pDst.addr.013.ph = phi float* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
+  %pSrc.addr.014.ph = phi ptr [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end, %middle.block ]
+  %pDst.addr.013.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end21, %middle.block ]
   %blkCnt.012.ph = phi i32 [ %numSamples, %vector.memcheck ], [ %numSamples, %while.body.preheader ], [ %ind.end23, %middle.block ]
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %while.body.preheader26
-  %pSrc.addr.014 = phi float* [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
-  %pDst.addr.013 = phi float* [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
+  %pSrc.addr.014 = phi ptr [ %incdec.ptr1, %while.body ], [ %pSrc.addr.014.ph, %while.body.preheader26 ]
+  %pDst.addr.013 = phi ptr [ %incdec.ptr3, %while.body ], [ %pDst.addr.013.ph, %while.body.preheader26 ]
   %blkCnt.012 = phi i32 [ %dec, %while.body ], [ %blkCnt.012.ph, %while.body.preheader26 ]
-  %incdec.ptr = getelementptr inbounds float, float* %pSrc.addr.014, i32 1
-  %10 = load float, float* %pSrc.addr.014, align 4
-  %incdec.ptr1 = getelementptr inbounds float, float* %pSrc.addr.014, i32 2
-  %11 = load float, float* %incdec.ptr, align 4
-  %mul = fmul fast float %10, %10
-  %mul2 = fmul fast float %11, %11
+  %incdec.ptr = getelementptr inbounds float, ptr %pSrc.addr.014, i32 1
+  %8 = load float, ptr %pSrc.addr.014, align 4
+  %incdec.ptr1 = getelementptr inbounds float, ptr %pSrc.addr.014, i32 2
+  %9 = load float, ptr %incdec.ptr, align 4
+  %mul = fmul fast float %8, %8
+  %mul2 = fmul fast float %9, %9
   %add = fadd fast float %mul2, %mul
-  %incdec.ptr3 = getelementptr inbounds float, float* %pDst.addr.013, i32 1
-  store float %add, float* %pDst.addr.013, align 4
+  %incdec.ptr3 = getelementptr inbounds float, ptr %pDst.addr.013, i32 1
+  store float %add, ptr %pDst.addr.013, align 4
   %dec = add i32 %blkCnt.012, -1
   %cmp.not = icmp eq i32 %dec, 0
   br i1 %cmp.not, label %while.end, label %while.body

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index e69d06d475300..2e75ce90eb48c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @vldst4(half* nocapture readonly %pIn, half* nocapture %pOut, i32 %numRows, i32 %numCols, i32 %scale.coerce) #0 {
+define void @vldst4(ptr nocapture readonly %pIn, ptr nocapture %pOut, i32 %numRows, i32 %numCols, i32 %scale.coerce) #0 {
 ; CHECK-LABEL: vldst4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -131,9 +131,9 @@ entry:
 vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i32 %shr, 1073741816
   %l2 = shl nuw i32 %n.vec, 2
-  %ind.end = getelementptr half, half* %pIn, i32 %l2
+  %ind.end = getelementptr half, ptr %pIn, i32 %l2
   %l3 = shl nuw i32 %n.vec, 2
-  %ind.end48 = getelementptr half, half* %pOut, i32 %l3
+  %ind.end48 = getelementptr half, ptr %pOut, i32 %l3
   %ind.end50 = sub nsw i32 %shr, %n.vec
   %broadcast.splatinsert55 = insertelement <8 x half> undef, half %l0, i32 0
   %broadcast.splat56 = shufflevector <8 x half> %broadcast.splatinsert55, <8 x half> undef, <8 x i32> zeroinitializer
@@ -142,10 +142,9 @@ vector.ph:                                        ; preds = %vector.memcheck
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %l4 = shl i32 %index, 2
-  %next.gep = getelementptr half, half* %pIn, i32 %l4
+  %next.gep = getelementptr half, ptr %pIn, i32 %l4
   %l5 = shl i32 %index, 2
-  %l6 = bitcast half* %next.gep to <32 x half>*
-  %wide.vec = load <32 x half>, <32 x half>* %l6, align 2
+  %wide.vec = load <32 x half>, ptr %next.gep, align 2
   %strided.vec = shufflevector <32 x half> %wide.vec, <32 x half> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
   %strided.vec52 = shufflevector <32 x half> %wide.vec, <32 x half> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
   %strided.vec53 = shufflevector <32 x half> %wide.vec, <32 x half> undef, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
@@ -154,12 +153,11 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %l8 = fmul <8 x half> %strided.vec52, %broadcast.splat56
   %l9 = fmul <8 x half> %strided.vec53, %broadcast.splat56
   %l10 = fmul <8 x half> %strided.vec54, %broadcast.splat56
-  %l11 = getelementptr inbounds half, half* %pOut, i32 %l5
-  %l12 = bitcast half* %l11 to <32 x half>*
+  %l11 = getelementptr inbounds half, ptr %pOut, i32 %l5
   %l13 = shufflevector <8 x half> %l7, <8 x half> %l8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %l14 = shufflevector <8 x half> %l9, <8 x half> %l10, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %interleaved.vec = shufflevector <16 x half> %l13, <16 x half> %l14, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x half> %interleaved.vec, <32 x half>* %l12, align 2
+  store <32 x half> %interleaved.vec, ptr %l11, align 2
   %index.next = add i32 %index, 8
   %l15 = icmp eq i32 %index.next, %n.vec
   br i1 %l15, label %while.end, label %vector.body

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll
index 8882108a0564d..79b7202bb68dd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-tailpred.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
-define float @minf32(float* noalias nocapture readonly %s1, float* noalias nocapture readonly %s2, float* noalias nocapture %d, i32 %n) {
+define float @minf32(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) {
 ; CHECK-LABEL: minf32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -33,26 +33,23 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds float, float* %s1, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
-  %2 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
-  %3 = getelementptr inbounds float, float* %s2, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
-  %5 = call fast <4 x float> @llvm.minnum.v4f32(<4 x float> %2, <4 x float> %wide.masked.load10)
-  %6 = getelementptr inbounds float, float* %d, i32 %index
-  %7 = bitcast float* %6 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %5, <4 x float>* %7, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds float, ptr %s1, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
+  %1 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
+  %2 = getelementptr inbounds float, ptr %s2, i32 %index
+  %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
+  %3 = call fast <4 x float> @llvm.minnum.v4f32(<4 x float> %1, <4 x float> %wide.masked.load10)
+  %4 = getelementptr inbounds float, ptr %d, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret float undef
 }
 
-define float @maxaf32(float* noalias nocapture readonly %s1, float* noalias nocapture readonly %s2, float* noalias nocapture %d, i32 %n) {
+define float @maxaf32(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) {
 ; CHECK-LABEL: maxaf32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -83,28 +80,25 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds float, float* %s1, i32 %index
-  %1 = bitcast float* %0 to <4 x float>*
-  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
-  %2 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
-  %3 = getelementptr inbounds float, float* %s2, i32 %index
-  %4 = bitcast float* %3 to <4 x float>*
-  %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
-  %5 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load10)
-  %6 = call fast <4 x float> @llvm.maxnum.v4f32(<4 x float> %2, <4 x float> %5)
-  %7 = getelementptr inbounds float, float* %d, i32 %index
-  %8 = bitcast float* %7 to <4 x float>*
-  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %6, <4 x float>* %8, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds float, ptr %s1, i32 %index
+  %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
+  %1 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load)
+  %2 = getelementptr inbounds float, ptr %s2, i32 %index
+  %wide.masked.load10 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x float> poison)
+  %3 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.masked.load10)
+  %4 = call fast <4 x float> @llvm.maxnum.v4f32(<4 x float> %1, <4 x float> %3)
+  %5 = getelementptr inbounds float, ptr %d, i32 %index
+  call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %5, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret float undef
 }
 
 
-define half @maxf16(half* noalias nocapture readonly %s1, half* noalias nocapture readonly %s2, half* noalias nocapture %d, i32 %n) {
+define half @maxf16(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) {
 ; CHECK-LABEL: maxf16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -136,26 +130,23 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds half, half* %s1, i32 %index
-  %1 = bitcast half* %0 to <8 x half>*
-  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
-  %2 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load)
-  %3 = getelementptr inbounds half, half* %s2, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
-  %5 = call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %2, <8 x half> %wide.masked.load12)
-  %6 = getelementptr inbounds half, half* %d, i32 %index
-  %7 = bitcast half* %6 to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %5, <8 x half>* %7, i32 2, <8 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds half, ptr %s1, i32 %index
+  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
+  %1 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load)
+  %2 = getelementptr inbounds half, ptr %s2, i32 %index
+  %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
+  %3 = call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %1, <8 x half> %wide.masked.load12)
+  %4 = getelementptr inbounds half, ptr %d, i32 %index
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %3, ptr %4, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %8 = icmp eq i32 %index.next, %n.vec
-  br i1 %8, label %for.cond.cleanup, label %vector.body
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret half undef
 }
 
-define half @minaf16(half* noalias nocapture readonly %s1, half* noalias nocapture readonly %s2, half* noalias nocapture %d, i32 %n) {
+define half @minaf16(ptr noalias nocapture readonly %s1, ptr noalias nocapture readonly %s2, ptr noalias nocapture %d, i32 %n) {
 ; CHECK-LABEL: minaf16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -186,35 +177,32 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds half, half* %s1, i32 %index
-  %1 = bitcast half* %0 to <8 x half>*
-  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
-  %2 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load)
-  %3 = getelementptr inbounds half, half* %s2, i32 %index
-  %4 = bitcast half* %3 to <8 x half>*
-  %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
-  %5 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load12)
-  %6 = call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %2, <8 x half> %5)
-  %7 = getelementptr inbounds half, half* %d, i32 %index
-  %8 = bitcast half* %7 to <8 x half>*
-  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %6, <8 x half>* %8, i32 2, <8 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds half, ptr %s1, i32 %index
+  %wide.masked.load = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
+  %1 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load)
+  %2 = getelementptr inbounds half, ptr %s2, i32 %index
+  %wide.masked.load12 = call <8 x half> @llvm.masked.load.v8f16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x half> poison)
+  %3 = call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %wide.masked.load12)
+  %4 = call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %1, <8 x half> %3)
+  %5 = getelementptr inbounds half, ptr %d, i32 %index
+  call void @llvm.masked.store.v8f16.p0(<8 x half> %4, ptr %5, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %9 = icmp eq i32 %index.next, %n.vec
-  br i1 %9, label %for.cond.cleanup, label %vector.body
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret half undef
 }
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
 declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
 declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
 declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
-declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>)
+declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>)
 declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>)
 declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>)
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32 immarg, <8 x i1>)
+declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmla.ll b/llvm/test/CodeGen/Thumb2/mve-vmla.ll
index 9de28ad393fa7..242adb1341e67 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmla.ll
@@ -79,7 +79,7 @@ entry:
   ret <16 x i8> %3
 }
 
-define void @vmla32_in_loop(i32* %s1, i32 %x, i32* %d, i32 %n) {
+define void @vmla32_in_loop(ptr %s1, i32 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmla32_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB6_1: @ %vector.body
@@ -99,25 +99,22 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %s1, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = mul nsw <4 x i32> %wide.load, %broadcast.splat9
-  %3 = getelementptr inbounds i32, i32* %d, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load10 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = add nsw <4 x i32> %wide.load10, %2
-  %6 = bitcast i32* %3 to <4 x i32>*
-  store <4 x i32> %5, <4 x i32>* %6, align 4
+  %0 = getelementptr inbounds i32, ptr %s1, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = mul nsw <4 x i32> %wide.load, %broadcast.splat9
+  %2 = getelementptr inbounds i32, ptr %d, i32 %index
+  %wide.load10 = load <4 x i32>, ptr %2, align 4
+  %3 = add nsw <4 x i32> %wide.load10, %1
+  store <4 x i32> %3, ptr %2, align 4
   %index.next = add i32 %index, 4
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
 }
 
-define void @vmla16_in_loop(i16* %s1, i16 %x, i16* %d, i32 %n) {
+define void @vmla16_in_loop(ptr %s1, i16 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmla16_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB7_1: @ %vector.body
@@ -137,25 +134,22 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %s1, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = mul <8 x i16> %wide.load, %broadcast.splat12
-  %3 = getelementptr inbounds i16, i16* %d, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load13 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = add <8 x i16> %2, %wide.load13
-  %6 = bitcast i16* %3 to <8 x i16>*
-  store <8 x i16> %5, <8 x i16>* %6, align 2
+  %0 = getelementptr inbounds i16, ptr %s1, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = mul <8 x i16> %wide.load, %broadcast.splat12
+  %2 = getelementptr inbounds i16, ptr %d, i32 %index
+  %wide.load13 = load <8 x i16>, ptr %2, align 2
+  %3 = add <8 x i16> %1, %wide.load13
+  store <8 x i16> %3, ptr %2, align 2
   %index.next = add i32 %index, 8
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
 }
 
-define void @vmla8_in_loop(i8* %s1, i8 %x, i8* %d, i32 %n) {
+define void @vmla8_in_loop(ptr %s1, i8 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmla8_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB8_1: @ %vector.body
@@ -175,19 +169,16 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %s1, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 2
-  %2 = mul <16 x i8> %wide.load, %broadcast.splat12
-  %3 = getelementptr inbounds i8, i8* %d, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load13 = load <16 x i8>, <16 x i8>* %4, align 2
-  %5 = add <16 x i8> %2, %wide.load13
-  %6 = bitcast i8* %3 to <16 x i8>*
-  store <16 x i8> %5, <16 x i8>* %6, align 2
+  %0 = getelementptr inbounds i8, ptr %s1, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 2
+  %1 = mul <16 x i8> %wide.load, %broadcast.splat12
+  %2 = getelementptr inbounds i8, ptr %d, i32 %index
+  %wide.load13 = load <16 x i8>, ptr %2, align 2
+  %3 = add <16 x i8> %1, %wide.load13
+  store <16 x i8> %3, ptr %2, align 2
   %index.next = add i32 %index, 16
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
@@ -272,7 +263,7 @@ entry:
   ret <16 x i8> %3
 }
 
-define void @vmlas32_in_loop(i32* %s1, i32 %x, i32* %d, i32 %n) {
+define void @vmlas32_in_loop(ptr %s1, i32 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmlas32_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB15_1: @ %vector.body
@@ -292,25 +283,22 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %s1, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = getelementptr inbounds i32, i32* %d, i32 %index
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load10 = load <4 x i32>, <4 x i32>* %3, align 4
-  %4 = mul nsw <4 x i32> %wide.load, %wide.load10
-  %5 = add nsw <4 x i32> %broadcast.splat9, %4
-  %6 = bitcast i32* %2 to <4 x i32>*
-  store <4 x i32> %5, <4 x i32>* %6, align 4
+  %0 = getelementptr inbounds i32, ptr %s1, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = getelementptr inbounds i32, ptr %d, i32 %index
+  %wide.load10 = load <4 x i32>, ptr %1, align 4
+  %2 = mul nsw <4 x i32> %wide.load, %wide.load10
+  %3 = add nsw <4 x i32> %broadcast.splat9, %2
+  store <4 x i32> %3, ptr %1, align 4
   %index.next = add i32 %index, 4
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
 }
 
-define void @vmlas16_in_loop(i16* %s1, i16 %x, i16* %d, i32 %n) {
+define void @vmlas16_in_loop(ptr %s1, i16 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmlas16_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB16_1: @ %vector.body
@@ -330,25 +318,22 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %s1, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = getelementptr inbounds i16, i16* %d, i32 %index
-  %3 = bitcast i16* %2 to <8 x i16>*
-  %wide.load13 = load <8 x i16>, <8 x i16>* %3, align 2
-  %4 = mul <8 x i16> %wide.load, %wide.load13
-  %5 = add <8 x i16> %4, %broadcast.splat12
-  %6 = bitcast i16* %2 to <8 x i16>*
-  store <8 x i16> %5, <8 x i16>* %6, align 2
+  %0 = getelementptr inbounds i16, ptr %s1, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = getelementptr inbounds i16, ptr %d, i32 %index
+  %wide.load13 = load <8 x i16>, ptr %1, align 2
+  %2 = mul <8 x i16> %wide.load, %wide.load13
+  %3 = add <8 x i16> %2, %broadcast.splat12
+  store <8 x i16> %3, ptr %1, align 2
   %index.next = add i32 %index, 8
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
 }
 
-define void @vmlas8_in_loop(i8* %s1, i8 %x, i8* %d, i32 %n) {
+define void @vmlas8_in_loop(ptr %s1, i8 %x, ptr %d, i32 %n) {
 ; CHECK-LABEL: vmlas8_in_loop:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:  .LBB17_1: @ %vector.body
@@ -368,19 +353,16 @@ entry:
 
 vector.body:
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %s1, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 2
-  %2 = getelementptr inbounds i8, i8* %d, i32 %index
-  %3 = bitcast i8* %2 to <16 x i8>*
-  %wide.load13 = load <16 x i8>, <16 x i8>* %3, align 2
-  %4 = mul <16 x i8> %wide.load, %wide.load13
-  %5 = add <16 x i8> %4, %broadcast.splat12
-  %6 = bitcast i8* %2 to <16 x i8>*
-  store <16 x i8> %5, <16 x i8>* %6, align 2
+  %0 = getelementptr inbounds i8, ptr %s1, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 2
+  %1 = getelementptr inbounds i8, ptr %d, i32 %index
+  %wide.load13 = load <16 x i8>, ptr %1, align 2
+  %2 = mul <16 x i8> %wide.load, %wide.load13
+  %3 = add <16 x i8> %2, %broadcast.splat12
+  store <16 x i8> %3, ptr %1, align 2
   %index.next = add i32 %index, 16
-  %7 = icmp eq i32 %index.next, %n
-  br i1 %7, label %for.cond.cleanup, label %vector.body
+  %4 = icmp eq i32 %index.next, %n
+  br i1 %4, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmovnstore.ll b/llvm/test/CodeGen/Thumb2/mve-vmovnstore.ll
index 61171a86d827c..5850ad11e96b6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmovnstore.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmovnstore.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVE
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-MVEFP
 
-define arm_aapcs_vfpcc void @vmovn32_trunc1(<4 x i32> %src1, <4 x i32> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_trunc1(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_trunc1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q0, q1
@@ -11,11 +11,11 @@ define arm_aapcs_vfpcc void @vmovn32_trunc1(<4 x i32> %src1, <4 x i32> %src2, <8
 entry:
   %strided.vec = shufflevector <4 x i32> %src1, <4 x i32> %src2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
   %out = trunc <8 x i32> %strided.vec to <8 x i16>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_trunc2(<4 x i32> %src1, <4 x i32> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_trunc2(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_trunc2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q1, q0
@@ -24,11 +24,11 @@ define arm_aapcs_vfpcc void @vmovn32_trunc2(<4 x i32> %src1, <4 x i32> %src2, <8
 entry:
   %strided.vec = shufflevector <4 x i32> %src1, <4 x i32> %src2, <8 x i32> <i32 4, i32 0, i32 5, i32 1, i32 6, i32 2, i32 7, i32 3>
   %out = trunc <8 x i32> %strided.vec to <8 x i16>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_trunc1_onesrc(<8 x i32> %src1, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_trunc1_onesrc(<8 x i32> %src1, ptr %dest) {
 ; CHECK-LABEL: vmovn32_trunc1_onesrc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q0, q1
@@ -37,11 +37,11 @@ define arm_aapcs_vfpcc void @vmovn32_trunc1_onesrc(<8 x i32> %src1, <8 x i16> *%
 entry:
   %strided.vec = shufflevector <8 x i32> %src1, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
   %out = trunc <8 x i32> %strided.vec to <8 x i16>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_trunc2_onesrc(<8 x i32> %src1, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_trunc2_onesrc(<8 x i32> %src1, ptr %dest) {
 ; CHECK-LABEL: vmovn32_trunc2_onesrc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q1, q0
@@ -50,11 +50,11 @@ define arm_aapcs_vfpcc void @vmovn32_trunc2_onesrc(<8 x i32> %src1, <8 x i16> *%
 entry:
   %strided.vec = shufflevector <8 x i32> %src1, <8 x i32> undef, <8 x i32> <i32 4, i32 0, i32 5, i32 1, i32 6, i32 2, i32 7, i32 3>
   %out = trunc <8 x i32> %strided.vec to <8 x i16>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_trunc1(<8 x i16> %src1, <8 x i16> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_trunc1(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_trunc1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q0, q1
@@ -63,11 +63,11 @@ define arm_aapcs_vfpcc void @vmovn16_trunc1(<8 x i16> %src1, <8 x i16> %src2, <1
 entry:
   %strided.vec = shufflevector <8 x i16> %src1, <8 x i16> %src2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %out = trunc <16 x i16> %strided.vec to <16 x i8>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_trunc2(<8 x i16> %src1, <8 x i16> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_trunc2(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_trunc2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q1, q0
@@ -76,11 +76,11 @@ define arm_aapcs_vfpcc void @vmovn16_trunc2(<8 x i16> %src1, <8 x i16> %src2, <1
 entry:
   %strided.vec = shufflevector <8 x i16> %src1, <8 x i16> %src2, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
   %out = trunc <16 x i16> %strided.vec to <16 x i8>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_trunc1_onesrc(<16 x i16> %src1, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_trunc1_onesrc(<16 x i16> %src1, ptr %dest) {
 ; CHECK-LABEL: vmovn16_trunc1_onesrc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q0, q1
@@ -89,11 +89,11 @@ define arm_aapcs_vfpcc void @vmovn16_trunc1_onesrc(<16 x i16> %src1, <16 x i8> *
 entry:
   %strided.vec = shufflevector <16 x i16> %src1, <16 x i16> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %out = trunc <16 x i16> %strided.vec to <16 x i8>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_trunc2_onesrc(<16 x i16> %src1, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_trunc2_onesrc(<16 x i16> %src1, ptr %dest) {
 ; CHECK-LABEL: vmovn16_trunc2_onesrc:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q1, q0
@@ -102,12 +102,12 @@ define arm_aapcs_vfpcc void @vmovn16_trunc2_onesrc(<16 x i16> %src1, <16 x i8> *
 entry:
   %strided.vec = shufflevector <16 x i16> %src1, <16 x i16> undef, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
   %out = trunc <16 x i16> %strided.vec to <16 x i8>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
 
-define arm_aapcs_vfpcc void @vmovn64_t1(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_t1(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_t1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s2, s4
@@ -116,11 +116,11 @@ define arm_aapcs_vfpcc void @vmovn64_t1(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 0, i32 2>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn64_t2(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_t2(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_t2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s6, s0
@@ -129,11 +129,11 @@ define arm_aapcs_vfpcc void @vmovn64_t2(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 2, i32 0>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn64_b1(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_b1(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_b1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s2, s6
@@ -142,11 +142,11 @@ define arm_aapcs_vfpcc void @vmovn64_b1(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 0, i32 3>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn64_b2(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_b2(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_b2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s4, s6
@@ -157,11 +157,11 @@ define arm_aapcs_vfpcc void @vmovn64_b2(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 3, i32 0>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn64_b3(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_b3(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_b3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s0, s2
@@ -172,11 +172,11 @@ define arm_aapcs_vfpcc void @vmovn64_b3(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 1, i32 2>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn64_b4(<2 x i64> %src1, <2 x i64> %src2, <2 x i64> *%dest) {
+define arm_aapcs_vfpcc void @vmovn64_b4(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn64_b4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s6, s2
@@ -185,13 +185,13 @@ define arm_aapcs_vfpcc void @vmovn64_b4(<2 x i64> %src1, <2 x i64> %src2, <2 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <2 x i64> %src1, <2 x i64> %src2, <2 x i32> <i32 2, i32 1>
-  store <2 x i64> %out, <2 x i64> *%dest, align 8
+  store <2 x i64> %out, ptr %dest, align 8
   ret void
 }
 
 
 
-define arm_aapcs_vfpcc void @vmovn32_t1(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_t1(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_t1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s1, s4
@@ -200,11 +200,11 @@ define arm_aapcs_vfpcc void @vmovn32_t1(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_t2(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_t2(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_t2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s5, s0
@@ -213,11 +213,11 @@ define arm_aapcs_vfpcc void @vmovn32_t2(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 4, i32 0, i32 6, i32 2>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_b1(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_b1(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_b1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s1, s5
@@ -226,11 +226,11 @@ define arm_aapcs_vfpcc void @vmovn32_b1(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_b2(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_b2(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_b2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s4, s5
@@ -241,11 +241,11 @@ define arm_aapcs_vfpcc void @vmovn32_b2(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 5, i32 0, i32 7, i32 2>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_b3(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_b3(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_b3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s0, s1
@@ -256,11 +256,11 @@ define arm_aapcs_vfpcc void @vmovn32_b3(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 1, i32 4, i32 3, i32 6>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn32_b4(<4 x i32> %src1, <4 x i32> %src2, <4 x i32> *%dest) {
+define arm_aapcs_vfpcc void @vmovn32_b4(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn32_b4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.f32 s5, s1
@@ -269,14 +269,14 @@ define arm_aapcs_vfpcc void @vmovn32_b4(<4 x i32> %src1, <4 x i32> %src2, <4 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src1, <4 x i32> %src2, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
-  store <4 x i32> %out, <4 x i32> *%dest, align 8
+  store <4 x i32> %out, ptr %dest, align 8
   ret void
 }
 
 
 
 
-define arm_aapcs_vfpcc void @vmovn16_t1(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_t1(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_t1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q0, q1
@@ -284,11 +284,11 @@ define arm_aapcs_vfpcc void @vmovn16_t1(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_t2(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_t2(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_t2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i32 q1, q0
@@ -296,11 +296,11 @@ define arm_aapcs_vfpcc void @vmovn16_t2(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 8, i32 0, i32 10, i32 2, i32 12, i32 4, i32 14, i32 6>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_b1(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_b1(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_b1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnb.i32 q1, q0
@@ -308,11 +308,11 @@ define arm_aapcs_vfpcc void @vmovn16_b1(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_b2(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_b2(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-MVE-LABEL: vmovn16_b2:
 ; CHECK-MVE:       @ %bb.0: @ %entry
 ; CHECK-MVE-NEXT:    vmov.u16 r1, q1[1]
@@ -348,11 +348,11 @@ define arm_aapcs_vfpcc void @vmovn16_b2(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 9, i32 0, i32 11, i32 2, i32 13, i32 4, i32 15, i32 6>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_b3(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_b3(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-MVE-LABEL: vmovn16_b3:
 ; CHECK-MVE:       @ %bb.0: @ %entry
 ; CHECK-MVE-NEXT:    vmov.u16 r1, q0[1]
@@ -388,11 +388,11 @@ define arm_aapcs_vfpcc void @vmovn16_b3(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 1, i32 8, i32 3, i32 10, i32 5, i32 12, i32 7, i32 14>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn16_b4(<8 x i16> %src1, <8 x i16> %src2, <8 x i16> *%dest) {
+define arm_aapcs_vfpcc void @vmovn16_b4(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn16_b4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnb.i32 q0, q1
@@ -400,12 +400,12 @@ define arm_aapcs_vfpcc void @vmovn16_b4(<8 x i16> %src1, <8 x i16> %src2, <8 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src1, <8 x i16> %src2, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
-  store <8 x i16> %out, <8 x i16> *%dest, align 8
+  store <8 x i16> %out, ptr %dest, align 8
   ret void
 }
 
 
-define arm_aapcs_vfpcc void @vmovn8_b1(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_b1(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_b1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q0, q1
@@ -413,11 +413,11 @@ define arm_aapcs_vfpcc void @vmovn8_b1(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn8_b2(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_b2(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_b2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnt.i16 q1, q0
@@ -425,11 +425,11 @@ define arm_aapcs_vfpcc void @vmovn8_b2(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 16, i32 0, i32 18, i32 2, i32 20, i32 4, i32 22, i32 6, i32 24, i32 8, i32 26, i32 10, i32 28, i32 12, i32 30, i32 14>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn8_t1(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_t1(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_t1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnb.i16 q1, q0
@@ -437,11 +437,11 @@ define arm_aapcs_vfpcc void @vmovn8_t1(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn8_t2(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_t2(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_t2:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.u8 r1, q1[1]
@@ -480,11 +480,11 @@ define arm_aapcs_vfpcc void @vmovn8_t2(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 17, i32 0, i32 19, i32 2, i32 21, i32 4, i32 23, i32 6, i32 25, i32 8, i32 27, i32 10, i32 29, i32 12, i32 31, i32 14>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn8_t3(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_t3(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_t3:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.u8 r1, q0[1]
@@ -523,11 +523,11 @@ define arm_aapcs_vfpcc void @vmovn8_t3(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 1, i32 16, i32 3, i32 18, i32 5, i32 20, i32 7, i32 22, i32 9, i32 24, i32 11, i32 26, i32 13, i32 28, i32 15, i32 30>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }
 
-define arm_aapcs_vfpcc void @vmovn8_t4(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> *%dest) {
+define arm_aapcs_vfpcc void @vmovn8_t4(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) {
 ; CHECK-LABEL: vmovn8_t4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovnb.i16 q0, q1
@@ -535,6 +535,6 @@ define arm_aapcs_vfpcc void @vmovn8_t4(<16 x i8> %src1, <16 x i8> %src2, <16 x i
 ; CHECK-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src1, <16 x i8> %src2, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
-  store <16 x i8> %out, <16 x i8> *%dest, align 8
+  store <16 x i8> %out, ptr %dest, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
index 46480d47a2830..3c6533938fa1d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
@@ -224,7 +224,7 @@ entry:
   ret <16 x i8> %s2
 }
 
-define void @vmulh_s8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vmulh_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_s8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -244,29 +244,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = sext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load17 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = sext <16 x i8> %wide.load17 to <16 x i16>
-  %6 = mul nsw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %z, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  store <16 x i8> %8, <16 x i8>* %10, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = sext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load17 = load <16 x i8>, ptr %2, align 1
+  %3 = sext <16 x i8> %wide.load17 to <16 x i16>
+  %4 = mul nsw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %6, ptr %7, align 1
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vmulh_s16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vmulh_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_s16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -286,29 +283,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = sext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load17 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load17 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %z, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  store <8 x i16> %8, <8 x i16>* %10, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = sext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load17 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load17 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %6, ptr %7, align 2
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vmulh_s32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vmulh_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_s32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -328,29 +322,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = sext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load17 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = sext <4 x i32> %wide.load17 to <4 x i64>
-  %6 = mul nsw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 32, i64 32, i64 32, i64 32>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %z, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  store <4 x i32> %8, <4 x i32>* %10, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = sext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load17 = load <4 x i32>, ptr %2, align 4
+  %3 = sext <4 x i32> %wide.load17 to <4 x i64>
+  %4 = mul nsw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 32, i64 32, i64 32, i64 32>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %6, ptr %7, align 4
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vmulh_u8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) {
+define void @vmulh_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_u8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -370,29 +361,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = zext <16 x i8> %wide.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load17 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = zext <16 x i8> %wide.load17 to <16 x i16>
-  %6 = mul nuw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %z, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  store <16 x i8> %8, <16 x i8>* %10, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = zext <16 x i8> %wide.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load17 = load <16 x i8>, ptr %2, align 1
+  %3 = zext <16 x i8> %wide.load17 to <16 x i16>
+  %4 = mul nuw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %6, ptr %7, align 1
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vmulh_u16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vmulh_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_u16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -412,29 +400,26 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = zext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load17 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = zext <8 x i16> %wide.load17 to <8 x i32>
-  %6 = mul nuw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %z, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  store <8 x i16> %8, <8 x i16>* %10, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = zext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load17 = load <8 x i16>, ptr %2, align 2
+  %3 = zext <8 x i16> %wide.load17 to <8 x i32>
+  %4 = mul nuw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %6, ptr %7, align 2
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vmulh_u32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vmulh_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vmulh_u32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -454,30 +439,27 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = zext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load17 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = zext <4 x i32> %wide.load17 to <4 x i64>
-  %6 = mul nuw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 32, i64 32, i64 32, i64 32>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %z, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  store <4 x i32> %8, <4 x i32>* %10, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = zext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load17 = load <4 x i32>, ptr %2, align 4
+  %3 = zext <4 x i32> %wide.load17 to <4 x i64>
+  %4 = mul nuw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 32, i64 32, i64 32, i64 32>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %6, ptr %7, align 4
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, 1024
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, 1024
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
 
-define void @vmulh_s32_pred(i32* noalias nocapture %d, i32* noalias nocapture readonly %x, i32* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_s32_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_s32_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -508,29 +490,26 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %2 = sext <4 x i32> %wide.masked.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %5 = sext <4 x i32> %wide.masked.load12 to <4 x i64>
-  %6 = mul nsw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 32, i64 32, i64 32, i64 32>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %d, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %1 = sext <4 x i32> %wide.masked.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %3 = sext <4 x i32> %wide.masked.load12 to <4 x i64>
+  %4 = mul nsw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 32, i64 32, i64 32, i64 32>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %d, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %6, ptr %7, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @vmulh_u32_pred(i32* noalias nocapture %d, i32* noalias nocapture readonly %x, i32* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_u32_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_u32_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -561,29 +540,26 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %2 = zext <4 x i32> %wide.masked.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
-  %5 = zext <4 x i32> %wide.masked.load12 to <4 x i64>
-  %6 = mul nuw <4 x i64> %5, %2
-  %7 = lshr <4 x i64> %6, <i64 32, i64 32, i64 32, i64 32>
-  %8 = trunc <4 x i64> %7 to <4 x i32>
-  %9 = getelementptr inbounds i32, i32* %d, i32 %index
-  %10 = bitcast i32* %9 to <4 x i32>*
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %0, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %1 = zext <4 x i32> %wide.masked.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %2, i32 4, <4 x i1> %active.lane.mask, <4 x i32> poison)
+  %3 = zext <4 x i32> %wide.masked.load12 to <4 x i64>
+  %4 = mul nuw <4 x i64> %3, %1
+  %5 = lshr <4 x i64> %4, <i64 32, i64 32, i64 32, i64 32>
+  %6 = trunc <4 x i64> %5 to <4 x i32>
+  %7 = getelementptr inbounds i32, ptr %d, i32 %index
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %6, ptr %7, i32 4, <4 x i1> %active.lane.mask)
   %index.next = add i32 %index, 4
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @vmulh_s16_pred(i16* noalias nocapture %d, i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_s16_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_s16_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -614,29 +590,26 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
-  %2 = sext <8 x i16> %wide.masked.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
-  %5 = sext <8 x i16> %wide.masked.load12 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %d, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %8, <8 x i16>* %10, i32 2, <8 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
+  %1 = sext <8 x i16> %wide.masked.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
+  %3 = sext <8 x i16> %wide.masked.load12 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %d, i32 %index
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %6, ptr %7, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @vmulh_u16_pred(i16* noalias nocapture %d, i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_u16_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_u16_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -667,29 +640,26 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
-  %2 = zext <8 x i16> %wide.masked.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %4, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
-  %5 = zext <8 x i16> %wide.masked.load12 to <8 x i32>
-  %6 = mul nuw <8 x i32> %5, %2
-  %7 = lshr <8 x i32> %6, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %8 = trunc <8 x i32> %7 to <8 x i16>
-  %9 = getelementptr inbounds i16, i16* %d, i32 %index
-  %10 = bitcast i16* %9 to <8 x i16>*
-  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %8, <8 x i16>* %10, i32 2, <8 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %0, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
+  %1 = zext <8 x i16> %wide.masked.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %2, i32 2, <8 x i1> %active.lane.mask, <8 x i16> poison)
+  %3 = zext <8 x i16> %wide.masked.load12 to <8 x i32>
+  %4 = mul nuw <8 x i32> %3, %1
+  %5 = lshr <8 x i32> %4, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %6 = trunc <8 x i32> %5 to <8 x i16>
+  %7 = getelementptr inbounds i16, ptr %d, i32 %index
+  call void @llvm.masked.store.v8i16.p0(<8 x i16> %6, ptr %7, i32 2, <8 x i1> %active.lane.mask)
   %index.next = add i32 %index, 8
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @vmulh_s8_pred(i8* noalias nocapture %d, i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_s8_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_s8_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -720,29 +690,26 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
-  %2 = sext <16 x i8> %wide.masked.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.masked.load12 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %4, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
-  %5 = sext <16 x i8> %wide.masked.load12 to <16 x i16>
-  %6 = mul nsw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %d, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %8, <16 x i8>* %10, i32 1, <16 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %0, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
+  %1 = sext <16 x i8> %wide.masked.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.masked.load12 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %2, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
+  %3 = sext <16 x i8> %wide.masked.load12 to <16 x i16>
+  %4 = mul nsw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %d, i32 %index
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %6, ptr %7, i32 1, <16 x i1> %active.lane.mask)
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define void @vmulh_u8_pred(i8* noalias nocapture %d, i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i32 %n) {
+define void @vmulh_u8_pred(ptr noalias nocapture %d, ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: vmulh_u8_pred:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -773,34 +740,31 @@ vector.ph:                                        ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %n)
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
-  %2 = zext <16 x i8> %wide.masked.load to <16 x i16>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.masked.load12 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %4, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
-  %5 = zext <16 x i8> %wide.masked.load12 to <16 x i16>
-  %6 = mul nuw <16 x i16> %5, %2
-  %7 = lshr <16 x i16> %6, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %8 = trunc <16 x i16> %7 to <16 x i8>
-  %9 = getelementptr inbounds i8, i8* %d, i32 %index
-  %10 = bitcast i8* %9 to <16 x i8>*
-  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %8, <16 x i8>* %10, i32 1, <16 x i1> %active.lane.mask)
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %0, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
+  %1 = zext <16 x i8> %wide.masked.load to <16 x i16>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.masked.load12 = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %2, i32 1, <16 x i1> %active.lane.mask, <16 x i8> poison)
+  %3 = zext <16 x i8> %wide.masked.load12 to <16 x i16>
+  %4 = mul nuw <16 x i16> %3, %1
+  %5 = lshr <16 x i16> %4, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %6 = trunc <16 x i16> %5 to <16 x i8>
+  %7 = getelementptr inbounds i8, ptr %d, i32 %index
+  call void @llvm.masked.store.v16i8.p0(<16 x i8> %6, ptr %7, i32 1, <16 x i1> %active.lane.mask)
   %index.next = add i32 %index, 16
-  %11 = icmp eq i32 %index.next, %n.vec
-  br i1 %11, label %for.cond.cleanup, label %vector.body
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
-declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
-declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
-declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>)
+declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll b/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
index c59fbeaeb46d3..da0cd57d86dbb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define arm_aapcs_vfpcc void @test32(i32* noalias nocapture readonly %x, i32* noalias nocapture readonly %y, i32* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @test32(ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: test32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -37,38 +37,35 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %1 = getelementptr inbounds i32, i32* %x, i32 %index
-  %2 = bitcast i32* %1 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %2, align 4
-  %3 = shufflevector <4 x i32> %wide.load, <4 x i32> %wide.load, <2 x i32> <i32 0, i32 2>
-  %4 = shufflevector <4 x i32> %wide.load, <4 x i32> %wide.load, <2 x i32> <i32 1, i32 3>
+  %1 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %1, align 4
+  %2 = shufflevector <4 x i32> %wide.load, <4 x i32> %wide.load, <2 x i32> <i32 0, i32 2>
+  %3 = shufflevector <4 x i32> %wide.load, <4 x i32> %wide.load, <2 x i32> <i32 1, i32 3>
+  %4 = sext <2 x i32> %2 to <2 x i64>
   %5 = sext <2 x i32> %3 to <2 x i64>
-  %6 = sext <2 x i32> %4 to <2 x i64>
-  %7 = getelementptr inbounds i32, i32* %y, i32 %index
-  %8 = bitcast i32* %7 to <4 x i32>*
-  %wide.load15 = load <4 x i32>, <4 x i32>* %8, align 4
-  %9 = shufflevector <4 x i32> %wide.load15, <4 x i32> %wide.load15, <2 x i32> <i32 0, i32 2>
-  %10 = shufflevector <4 x i32> %wide.load15, <4 x i32> %wide.load15, <2 x i32> <i32 1, i32 3>
-  %11 = sext <2 x i32> %9 to <2 x i64>
-  %12 = sext <2 x i32> %10 to <2 x i64>
-  %13 = mul <2 x i64> %11, %5
-  %14 = mul <2 x i64> %12, %6
-  %15 = lshr <2 x i64> %13, <i64 31, i64 31>
-  %16 = lshr <2 x i64> %14, <i64 31, i64 31>
-  %17 = shufflevector <2 x i64> %15, <2 x i64> %16, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  %18 = trunc <4 x i64> %17 to <4 x i32>
-  %19 = getelementptr inbounds i32, i32* %z, i32 %index
-  %20 = bitcast i32* %19 to <4 x i32>*
-  store <4 x i32> %18, <4 x i32>* %20, align 4
+  %6 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load15 = load <4 x i32>, ptr %6, align 4
+  %7 = shufflevector <4 x i32> %wide.load15, <4 x i32> %wide.load15, <2 x i32> <i32 0, i32 2>
+  %8 = shufflevector <4 x i32> %wide.load15, <4 x i32> %wide.load15, <2 x i32> <i32 1, i32 3>
+  %9 = sext <2 x i32> %7 to <2 x i64>
+  %10 = sext <2 x i32> %8 to <2 x i64>
+  %11 = mul <2 x i64> %9, %4
+  %12 = mul <2 x i64> %10, %5
+  %13 = lshr <2 x i64> %11, <i64 31, i64 31>
+  %14 = lshr <2 x i64> %12, <i64 31, i64 31>
+  %15 = shufflevector <2 x i64> %13, <2 x i64> %14, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  %16 = trunc <4 x i64> %15 to <4 x i32>
+  %17 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %16, ptr %17, align 4
   %index.next = add i32 %index, 4
-  %21 = icmp eq i32 %index.next, %n
-  br i1 %21, label %for.cond.cleanup, label %vector.body
+  %18 = icmp eq i32 %index.next, %n
+  br i1 %18, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test16(i16* noalias nocapture readonly %x, i16* noalias nocapture readonly %y, i16* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @test16(ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: test16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -96,38 +93,35 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %1 = getelementptr inbounds i16, i16* %x, i32 %index
-  %2 = bitcast i16* %1 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %2, align 2
-  %3 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %4 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %1 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %1, align 2
+  %2 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %3 = shufflevector <8 x i16> %wide.load, <8 x i16> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %4 = sext <4 x i16> %2 to <4 x i32>
   %5 = sext <4 x i16> %3 to <4 x i32>
-  %6 = sext <4 x i16> %4 to <4 x i32>
-  %7 = getelementptr inbounds i16, i16* %y, i32 %index
-  %8 = bitcast i16* %7 to <8 x i16>*
-  %wide.load15 = load <8 x i16>, <8 x i16>* %8, align 2
-  %9 = shufflevector <8 x i16> %wide.load15, <8 x i16> %wide.load15, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %10 = shufflevector <8 x i16> %wide.load15, <8 x i16> %wide.load15, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %11 = sext <4 x i16> %9 to <4 x i32>
-  %12 = sext <4 x i16> %10 to <4 x i32>
-  %13 = mul <4 x i32> %11, %5
-  %14 = mul <4 x i32> %12, %6
-  %15 = lshr <4 x i32> %13, <i32 15, i32 15, i32 15, i32 15>
-  %16 = lshr <4 x i32> %14, <i32 15, i32 15, i32 15, i32 15>
-  %17 = shufflevector <4 x i32> %15, <4 x i32> %16, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  %18 = trunc <8 x i32> %17 to <8 x i16>
-  %19 = getelementptr inbounds i16, i16* %z, i32 %index
-  %20 = bitcast i16* %19 to <8 x i16>*
-  store <8 x i16> %18, <8 x i16>* %20, align 2
+  %6 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load15 = load <8 x i16>, ptr %6, align 2
+  %7 = shufflevector <8 x i16> %wide.load15, <8 x i16> %wide.load15, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %8 = shufflevector <8 x i16> %wide.load15, <8 x i16> %wide.load15, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %9 = sext <4 x i16> %7 to <4 x i32>
+  %10 = sext <4 x i16> %8 to <4 x i32>
+  %11 = mul <4 x i32> %9, %4
+  %12 = mul <4 x i32> %10, %5
+  %13 = lshr <4 x i32> %11, <i32 15, i32 15, i32 15, i32 15>
+  %14 = lshr <4 x i32> %12, <i32 15, i32 15, i32 15, i32 15>
+  %15 = shufflevector <4 x i32> %13, <4 x i32> %14, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %16 = trunc <8 x i32> %15 to <8 x i16>
+  %17 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %16, ptr %17, align 2
   %index.next = add i32 %index, 8
-  %21 = icmp eq i32 %index.next, %n
-  br i1 %21, label %for.cond.cleanup, label %vector.body
+  %18 = icmp eq i32 %index.next, %n
+  br i1 %18, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void
 }
 
-define arm_aapcs_vfpcc void @test8(i8* noalias nocapture readonly %x, i8* noalias nocapture readonly %y, i8* nocapture %z, i32 %n) {
+define arm_aapcs_vfpcc void @test8(ptr noalias nocapture readonly %x, ptr noalias nocapture readonly %y, ptr nocapture %z, i32 %n) {
 ; CHECK-LABEL: test8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    cmp r3, #1
@@ -155,32 +149,29 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
-  %1 = getelementptr inbounds i8, i8* %x, i32 %index
-  %2 = bitcast i8* %1 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %2, align 1
-  %3 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %4 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %1 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %1, align 1
+  %2 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %3 = shufflevector <16 x i8> %wide.load, <16 x i8> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %4 = zext <8 x i8> %2 to <8 x i16>
   %5 = zext <8 x i8> %3 to <8 x i16>
-  %6 = zext <8 x i8> %4 to <8 x i16>
-  %7 = getelementptr inbounds i8, i8* %y, i32 %index
-  %8 = bitcast i8* %7 to <16 x i8>*
-  %wide.load19 = load <16 x i8>, <16 x i8>* %8, align 1
-  %9 = shufflevector <16 x i8> %wide.load19, <16 x i8> %wide.load19, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %10 = shufflevector <16 x i8> %wide.load19, <16 x i8> %wide.load19, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %11 = zext <8 x i8> %9 to <8 x i16>
-  %12 = zext <8 x i8> %10 to <8 x i16>
-  %13 = mul <8 x i16> %11, %5
-  %14 = mul <8 x i16> %12, %6
-  %15 = lshr <8 x i16> %13, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %16 = lshr <8 x i16> %14, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
-  %17 = shufflevector <8 x i16> %15, <8 x i16> %16, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  %18 = trunc <16 x i16> %17 to <16 x i8>
-  %19 = getelementptr inbounds i8, i8* %z, i32 %index
-  %20 = bitcast i8* %19 to <16 x i8>*
-  store <16 x i8> %18, <16 x i8>* %20, align 1
+  %6 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load19 = load <16 x i8>, ptr %6, align 1
+  %7 = shufflevector <16 x i8> %wide.load19, <16 x i8> %wide.load19, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %8 = shufflevector <16 x i8> %wide.load19, <16 x i8> %wide.load19, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %9 = zext <8 x i8> %7 to <8 x i16>
+  %10 = zext <8 x i8> %8 to <8 x i16>
+  %11 = mul <8 x i16> %9, %4
+  %12 = mul <8 x i16> %10, %5
+  %13 = lshr <8 x i16> %11, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %14 = lshr <8 x i16> %12, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+  %15 = shufflevector <8 x i16> %13, <8 x i16> %14, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %16 = trunc <16 x i16> %15 to <16 x i8>
+  %17 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %16, ptr %17, align 1
   %index.next = add i32 %index, 16
-  %21 = icmp eq i32 %index.next, %n
-  br i1 %21, label %for.cond.cleanup, label %vector.body
+  %18 = icmp eq i32 %index.next, %n
+  br i1 %18, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body, %entry
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll b/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
index a1b5e6f2ec897..cc6fcaef319dd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vqdmulh-minmax.ll
@@ -349,7 +349,7 @@ entry:
 
 
 
-define void @vqdmulh_loop_i8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) local_unnamed_addr #0 {
+define void @vqdmulh_loop_i8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) local_unnamed_addr #0 {
 ; CHECK-LABEL: vqdmulh_loop_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -369,31 +369,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = sext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load26 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = sext <16 x i8> %wide.load26 to <16 x i32>
-  %6 = mul nsw <16 x i32> %5, %2
-  %7 = ashr <16 x i32> %6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
-  %8 = icmp slt <16 x i32> %7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
-  %9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
-  %10 = trunc <16 x i32> %9 to <16 x i8>
-  %11 = getelementptr inbounds i8, i8* %z, i32 %index
-  %12 = bitcast i8* %11 to <16 x i8>*
-  store <16 x i8> %10, <16 x i8>* %12, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = sext <16 x i8> %wide.load to <16 x i32>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load26 = load <16 x i8>, ptr %2, align 1
+  %3 = sext <16 x i8> %wide.load26 to <16 x i32>
+  %4 = mul nsw <16 x i32> %3, %1
+  %5 = ashr <16 x i32> %4, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %6 = icmp slt <16 x i32> %5, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %7 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %5, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>)
+  %8 = trunc <16 x i32> %7 to <16 x i8>
+  %9 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %8, ptr %9, align 1
   %index.next = add i32 %index, 16
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vqdmulh_loop_i16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vqdmulh_loop_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vqdmulh_loop_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -413,31 +410,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = sext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load30 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load30 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = ashr <8 x i32> %6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %8 = icmp slt <8 x i32> %7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
-  %10 = trunc <8 x i32> %9 to <8 x i16>
-  %11 = getelementptr inbounds i16, i16* %z, i32 %index
-  %12 = bitcast i16* %11 to <8 x i16>*
-  store <8 x i16> %10, <8 x i16>* %12, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = sext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load30 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load30 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = ashr <8 x i32> %4, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %6 = icmp slt <8 x i32> %5, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %7 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %5, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+  %8 = trunc <8 x i32> %7 to <8 x i16>
+  %9 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vqdmulh_loop_i32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vqdmulh_loop_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vqdmulh_loop_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -457,25 +451,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = sext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load30 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = sext <4 x i32> %wide.load30 to <4 x i64>
-  %6 = mul nsw <4 x i64> %5, %2
-  %7 = ashr <4 x i64> %6, <i64 31, i64 31, i64 31, i64 31>
-  %8 = icmp slt <4 x i64> %7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
-  %10 = trunc <4 x i64> %9 to <4 x i32>
-  %11 = getelementptr inbounds i32, i32* %z, i32 %index
-  %12 = bitcast i32* %11 to <4 x i32>*
-  store <4 x i32> %10, <4 x i32>* %12, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = sext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load30 = load <4 x i32>, ptr %2, align 4
+  %3 = sext <4 x i32> %wide.load30 to <4 x i64>
+  %4 = mul nsw <4 x i64> %3, %1
+  %5 = ashr <4 x i64> %4, <i64 31, i64 31, i64 31, i64 31>
+  %6 = icmp slt <4 x i64> %5, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %7 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %5, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
+  %8 = trunc <4 x i64> %7 to <4 x i32>
+  %9 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %8, ptr %9, align 4
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
index ebec3fe454a96..22a96f1ff419d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll
@@ -367,7 +367,7 @@ entry:
 
 
 
-define void @vqdmulh_loop_i8(i8* nocapture readonly %x, i8* nocapture readonly %y, i8* noalias nocapture %z, i32 %n) local_unnamed_addr #0 {
+define void @vqdmulh_loop_i8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) local_unnamed_addr #0 {
 ; CHECK-LABEL: vqdmulh_loop_i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -387,31 +387,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %x, i32 %index
-  %1 = bitcast i8* %0 to <16 x i8>*
-  %wide.load = load <16 x i8>, <16 x i8>* %1, align 1
-  %2 = sext <16 x i8> %wide.load to <16 x i32>
-  %3 = getelementptr inbounds i8, i8* %y, i32 %index
-  %4 = bitcast i8* %3 to <16 x i8>*
-  %wide.load26 = load <16 x i8>, <16 x i8>* %4, align 1
-  %5 = sext <16 x i8> %wide.load26 to <16 x i32>
-  %6 = mul nsw <16 x i32> %5, %2
-  %7 = ashr <16 x i32> %6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
-  %8 = icmp slt <16 x i32> %7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
-  %9 = select <16 x i1> %8, <16 x i32> %7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
-  %10 = trunc <16 x i32> %9 to <16 x i8>
-  %11 = getelementptr inbounds i8, i8* %z, i32 %index
-  %12 = bitcast i8* %11 to <16 x i8>*
-  store <16 x i8> %10, <16 x i8>* %12, align 1
+  %0 = getelementptr inbounds i8, ptr %x, i32 %index
+  %wide.load = load <16 x i8>, ptr %0, align 1
+  %1 = sext <16 x i8> %wide.load to <16 x i32>
+  %2 = getelementptr inbounds i8, ptr %y, i32 %index
+  %wide.load26 = load <16 x i8>, ptr %2, align 1
+  %3 = sext <16 x i8> %wide.load26 to <16 x i32>
+  %4 = mul nsw <16 x i32> %3, %1
+  %5 = ashr <16 x i32> %4, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+  %6 = icmp slt <16 x i32> %5, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %7 = select <16 x i1> %6, <16 x i32> %5, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+  %8 = trunc <16 x i32> %7 to <16 x i8>
+  %9 = getelementptr inbounds i8, ptr %z, i32 %index
+  store <16 x i8> %8, ptr %9, align 1
   %index.next = add i32 %index, 16
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vqdmulh_loop_i16(i16* nocapture readonly %x, i16* nocapture readonly %y, i16* noalias nocapture %z, i32 %n) {
+define void @vqdmulh_loop_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vqdmulh_loop_i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -431,31 +428,28 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i32 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = sext <8 x i16> %wide.load to <8 x i32>
-  %3 = getelementptr inbounds i16, i16* %y, i32 %index
-  %4 = bitcast i16* %3 to <8 x i16>*
-  %wide.load30 = load <8 x i16>, <8 x i16>* %4, align 2
-  %5 = sext <8 x i16> %wide.load30 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = ashr <8 x i32> %6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %8 = icmp slt <8 x i32> %7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %9 = select <8 x i1> %8, <8 x i32> %7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
-  %10 = trunc <8 x i32> %9 to <8 x i16>
-  %11 = getelementptr inbounds i16, i16* %z, i32 %index
-  %12 = bitcast i16* %11 to <8 x i16>*
-  store <8 x i16> %10, <8 x i16>* %12, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i32 %index
+  %wide.load = load <8 x i16>, ptr %0, align 2
+  %1 = sext <8 x i16> %wide.load to <8 x i32>
+  %2 = getelementptr inbounds i16, ptr %y, i32 %index
+  %wide.load30 = load <8 x i16>, ptr %2, align 2
+  %3 = sext <8 x i16> %wide.load30 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = ashr <8 x i32> %4, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %6 = icmp slt <8 x i32> %5, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+  %8 = trunc <8 x i32> %7 to <8 x i16>
+  %9 = getelementptr inbounds i16, ptr %z, i32 %index
+  store <8 x i16> %8, ptr %9, align 2
   %index.next = add i32 %index, 8
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void
 }
 
-define void @vqdmulh_loop_i32(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* noalias nocapture %z, i32 %n) {
+define void @vqdmulh_loop_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
 ; CHECK-LABEL: vqdmulh_loop_i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -475,25 +469,22 @@ entry:
 
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %x, i32 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = sext <4 x i32> %wide.load to <4 x i64>
-  %3 = getelementptr inbounds i32, i32* %y, i32 %index
-  %4 = bitcast i32* %3 to <4 x i32>*
-  %wide.load30 = load <4 x i32>, <4 x i32>* %4, align 4
-  %5 = sext <4 x i32> %wide.load30 to <4 x i64>
-  %6 = mul nsw <4 x i64> %5, %2
-  %7 = ashr <4 x i64> %6, <i64 31, i64 31, i64 31, i64 31>
-  %8 = icmp slt <4 x i64> %7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %9 = select <4 x i1> %8, <4 x i64> %7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
-  %10 = trunc <4 x i64> %9 to <4 x i32>
-  %11 = getelementptr inbounds i32, i32* %z, i32 %index
-  %12 = bitcast i32* %11 to <4 x i32>*
-  store <4 x i32> %10, <4 x i32>* %12, align 4
+  %0 = getelementptr inbounds i32, ptr %x, i32 %index
+  %wide.load = load <4 x i32>, ptr %0, align 4
+  %1 = sext <4 x i32> %wide.load to <4 x i64>
+  %2 = getelementptr inbounds i32, ptr %y, i32 %index
+  %wide.load30 = load <4 x i32>, ptr %2, align 4
+  %3 = sext <4 x i32> %wide.load30 to <4 x i64>
+  %4 = mul nsw <4 x i64> %3, %1
+  %5 = ashr <4 x i64> %4, <i64 31, i64 31, i64 31, i64 31>
+  %6 = icmp slt <4 x i64> %5, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %7 = select <4 x i1> %6, <4 x i64> %5, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+  %8 = trunc <4 x i64> %7 to <4 x i32>
+  %9 = getelementptr inbounds i32, ptr %z, i32 %index
+  store <4 x i32> %8, ptr %9, align 4
   %index.next = add i32 %index, 4
-  %13 = icmp eq i32 %index.next, 1024
-  br i1 %13, label %for.cond.cleanup, label %vector.body
+  %10 = icmp eq i32 %index.next, 1024
+  br i1 %10, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:                                 ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst2-post.ll b/llvm/test/CodeGen/Thumb2/mve-vst2-post.ll
index d482feef98990..3b318124043f2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst2-post.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst2-post.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define <8 x i32> *@vst2_v4i32(<4 x i32> *%src, <8 x i32> *%dst) {
+define ptr @vst2_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -13,19 +13,18 @@ define <8 x i32> *@vst2_v4i32(<4 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
   %s = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i32> %s, <8 x i32> *%dst
-  %ret = getelementptr inbounds <8 x i32>, <8 x i32>* %dst, i32 1
-  ret <8 x i32> *%ret
+  store <8 x i32> %s, ptr %dst
+  %ret = getelementptr inbounds <8 x i32>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i16
 
-define <16 x i16> *@vst2_v8i16(<8 x i16> *%src, <16 x i16> *%dst) {
+define ptr @vst2_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -35,19 +34,18 @@ define <16 x i16> *@vst2_v8i16(<8 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
   %s = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x i16> %s, <16 x i16> *%dst
-  %ret = getelementptr inbounds <16 x i16>, <16 x i16>* %dst, i32 1
-  ret <16 x i16> *%ret
+  store <16 x i16> %s, ptr %dst
+  %ret = getelementptr inbounds <16 x i16>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i8
 
-define <32 x i8> *@vst2_v16i8(<16 x i8> *%src, <32 x i8> *%dst) {
+define ptr @vst2_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -57,19 +55,18 @@ define <32 x i8> *@vst2_v16i8(<16 x i8> *%src, <32 x i8> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0
-  %l1 = load <16 x i8>, <16 x i8>* %s1, align 4
-  %s2 = getelementptr <16 x i8>, <16 x i8>* %src, i32 1
-  %l2 = load <16 x i8>, <16 x i8>* %s2, align 4
+  %l1 = load <16 x i8>, ptr %src, align 4
+  %s2 = getelementptr <16 x i8>, ptr %src, i32 1
+  %l2 = load <16 x i8>, ptr %s2, align 4
   %s = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x i8> %s, <32 x i8> *%dst
-  %ret = getelementptr inbounds <32 x i8>, <32 x i8>* %dst, i32 1
-  ret <32 x i8> *%ret
+  store <32 x i8> %s, ptr %dst
+  %ret = getelementptr inbounds <32 x i8>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i64
 
-define <4 x i64> *@vst2_v2i64(<2 x i64> *%src, <4 x i64> *%dst) {
+define ptr @vst2_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -82,19 +79,18 @@ define <4 x i64> *@vst2_v2i64(<2 x i64> *%src, <4 x i64> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0
-  %l1 = load <2 x i64>, <2 x i64>* %s1, align 4
-  %s2 = getelementptr <2 x i64>, <2 x i64>* %src, i32 1
-  %l2 = load <2 x i64>, <2 x i64>* %s2, align 4
+  %l1 = load <2 x i64>, ptr %src, align 4
+  %s2 = getelementptr <2 x i64>, ptr %src, i32 1
+  %l2 = load <2 x i64>, ptr %s2, align 4
   %s = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i64> %s, <4 x i64> *%dst
-  %ret = getelementptr inbounds <4 x i64>, <4 x i64>* %dst, i32 1
-  ret <4 x i64> *%ret
+  store <4 x i64> %s, ptr %dst
+  %ret = getelementptr inbounds <4 x i64>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f32
 
-define <8 x float> *@vst2_v4f32(<4 x float> *%src, <8 x float> *%dst) {
+define ptr @vst2_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -104,19 +100,18 @@ define <8 x float> *@vst2_v4f32(<4 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
   %s = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x float> %s, <8 x float> *%dst
-  %ret = getelementptr inbounds <8 x float>, <8 x float>* %dst, i32 1
-  ret <8 x float> *%ret
+  store <8 x float> %s, ptr %dst
+  %ret = getelementptr inbounds <8 x float>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f16
 
-define <16 x half> *@vst2_v8f16(<8 x half> *%src, <16 x half> *%dst) {
+define ptr @vst2_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -126,19 +121,18 @@ define <16 x half> *@vst2_v8f16(<8 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
   %s = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x half> %s, <16 x half> *%dst
-  %ret = getelementptr inbounds <16 x half>, <16 x half>* %dst, i32 1
-  ret <16 x half> *%ret
+  store <16 x half> %s, ptr %dst
+  %ret = getelementptr inbounds <16 x half>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f64
 
-define <4 x double> *@vst2_v2f64(<2 x double> *%src, <4 x double> *%dst) {
+define ptr @vst2_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -151,12 +145,11 @@ define <4 x double> *@vst2_v2f64(<2 x double> *%src, <4 x double> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0
-  %l1 = load <2 x double>, <2 x double>* %s1, align 4
-  %s2 = getelementptr <2 x double>, <2 x double>* %src, i32 1
-  %l2 = load <2 x double>, <2 x double>* %s2, align 4
+  %l1 = load <2 x double>, ptr %src, align 4
+  %s2 = getelementptr <2 x double>, ptr %src, i32 1
+  %l2 = load <2 x double>, ptr %s2, align 4
   %s = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x double> %s, <4 x double> *%dst
-  %ret = getelementptr inbounds <4 x double>, <4 x double>* %dst, i32 1
-  ret <4 x double> *%ret
+  store <4 x double> %s, ptr %dst
+  %ret = getelementptr inbounds <4 x double>, ptr %dst, i32 1
+  ret ptr %ret
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst2.ll b/llvm/test/CodeGen/Thumb2/mve-vst2.ll
index bcddeae5c2a76..483b838035ed6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst2.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst2.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vst2_v2i32(<2 x i32> *%src, <4 x i32> *%dst) {
+define void @vst2_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldm.w r0, {r2, r3, r12}
@@ -13,16 +13,15 @@ define void @vst2_v2i32(<2 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0
-  %l1 = load <2 x i32>, <2 x i32>* %s1, align 4
-  %s2 = getelementptr <2 x i32>, <2 x i32>* %src, i32 1
-  %l2 = load <2 x i32>, <2 x i32>* %s2, align 4
+  %l1 = load <2 x i32>, ptr %src, align 4
+  %s2 = getelementptr <2 x i32>, ptr %src, i32 1
+  %l2 = load <2 x i32>, ptr %s2, align 4
   %s = shufflevector <2 x i32> %l1, <2 x i32> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i32> %s, <4 x i32> *%dst, align 4
+  store <4 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v4i32(<4 x i32> *%src, <8 x i32> *%dst) {
+define void @vst2_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -31,16 +30,15 @@ define void @vst2_v4i32(<4 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vst21.32 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
   %s = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i32> %s, <8 x i32> *%dst, align 4
+  store <8 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v8i32(<8 x i32> *%src, <16 x i32> *%dst) {
+define void @vst2_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
@@ -53,16 +51,15 @@ define void @vst2_v8i32(<8 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vst21.32 {q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i32>, <8 x i32>* %src, i32 0
-  %l1 = load <8 x i32>, <8 x i32>* %s1, align 4
-  %s2 = getelementptr <8 x i32>, <8 x i32>* %src, i32 1
-  %l2 = load <8 x i32>, <8 x i32>* %s2, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
+  %s2 = getelementptr <8 x i32>, ptr %src, i32 1
+  %l2 = load <8 x i32>, ptr %s2, align 4
   %s = shufflevector <8 x i32> %l1, <8 x i32> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x i32> %s, <16 x i32> *%dst, align 4
+  store <16 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v16i32(<16 x i32> *%src, <32 x i32> *%dst) {
+define void @vst2_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -88,16 +85,15 @@ define void @vst2_v16i32(<16 x i32> *%src, <32 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i32>, <16 x i32>* %src, i32 0
-  %l1 = load <16 x i32>, <16 x i32>* %s1, align 4
-  %s2 = getelementptr <16 x i32>, <16 x i32>* %src, i32 1
-  %l2 = load <16 x i32>, <16 x i32>* %s2, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
+  %s2 = getelementptr <16 x i32>, ptr %src, i32 1
+  %l2 = load <16 x i32>, ptr %s2, align 4
   %s = shufflevector <16 x i32> %l1, <16 x i32> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x i32> %s, <32 x i32> *%dst, align 4
+  store <32 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v4i32_align1(<4 x i32> *%src, <8 x i32> *%dst) {
+define void @vst2_v4i32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -114,18 +110,17 @@ define void @vst2_v4i32_align1(<4 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vstrb.8 q3, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
   %s = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i32> %s, <8 x i32> *%dst, align 1
+  store <8 x i32> %s, ptr %dst, align 1
   ret void
 }
 
 ; i16
 
-define void @vst2_v2i16(<2 x i16> *%src, <4 x i16> *%dst) {
+define void @vst2_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrh r2, [r0, #2]
@@ -137,16 +132,15 @@ define void @vst2_v2i16(<2 x i16> *%src, <4 x i16> *%dst) {
 ; CHECK-NEXT:    vstrh.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i16>, <2 x i16>* %src, i32 0
-  %l1 = load <2 x i16>, <2 x i16>* %s1, align 4
-  %s2 = getelementptr <2 x i16>, <2 x i16>* %src, i32 1
-  %l2 = load <2 x i16>, <2 x i16>* %s2, align 4
+  %l1 = load <2 x i16>, ptr %src, align 4
+  %s2 = getelementptr <2 x i16>, ptr %src, i32 1
+  %l2 = load <2 x i16>, ptr %s2, align 4
   %s = shufflevector <2 x i16> %l1, <2 x i16> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i16> %s, <4 x i16> *%dst, align 2
+  store <4 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v4i16(<4 x i16> *%src, <8 x i16> *%dst) {
+define void @vst2_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r0, #8]
@@ -155,16 +149,15 @@ define void @vst2_v4i16(<4 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrh.16 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i16>, <4 x i16>* %src, i32 0
-  %l1 = load <4 x i16>, <4 x i16>* %s1, align 4
-  %s2 = getelementptr <4 x i16>, <4 x i16>* %src, i32 1
-  %l2 = load <4 x i16>, <4 x i16>* %s2, align 4
+  %l1 = load <4 x i16>, ptr %src, align 4
+  %s2 = getelementptr <4 x i16>, ptr %src, i32 1
+  %l2 = load <4 x i16>, ptr %s2, align 4
   %s = shufflevector <4 x i16> %l1, <4 x i16> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i16> %s, <8 x i16> *%dst, align 2
+  store <8 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v8i16(<8 x i16> *%src, <16 x i16> *%dst) {
+define void @vst2_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -173,16 +166,15 @@ define void @vst2_v8i16(<8 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vst21.16 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
   %s = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x i16> %s, <16 x i16> *%dst, align 2
+  store <16 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v16i16(<16 x i16> *%src, <32 x i16> *%dst) {
+define void @vst2_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
@@ -195,16 +187,15 @@ define void @vst2_v16i16(<16 x i16> *%src, <32 x i16> *%dst) {
 ; CHECK-NEXT:    vst21.16 {q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i16>, <16 x i16>* %src, i32 0
-  %l1 = load <16 x i16>, <16 x i16>* %s1, align 4
-  %s2 = getelementptr <16 x i16>, <16 x i16>* %src, i32 1
-  %l2 = load <16 x i16>, <16 x i16>* %s2, align 4
+  %l1 = load <16 x i16>, ptr %src, align 4
+  %s2 = getelementptr <16 x i16>, ptr %src, i32 1
+  %l2 = load <16 x i16>, ptr %s2, align 4
   %s = shufflevector <16 x i16> %l1, <16 x i16> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x i16> %s, <32 x i16> *%dst, align 2
+  store <32 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v8i16_align1(<8 x i16> *%src, <16 x i16> *%dst) {
+define void @vst2_v8i16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8i16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q2, [r0]
@@ -234,18 +225,17 @@ define void @vst2_v8i16_align1(<8 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vstrb.8 q3, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
   %s = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x i16> %s, <16 x i16> *%dst, align 1
+  store <16 x i16> %s, ptr %dst, align 1
   ret void
 }
 
 ; i8
 
-define void @vst2_v2i8(<2 x i8> *%src, <4 x i8> *%dst) {
+define void @vst2_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrb r2, [r0]
@@ -257,16 +247,15 @@ define void @vst2_v2i8(<2 x i8> *%src, <4 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i8>, <2 x i8>* %src, i32 0
-  %l1 = load <2 x i8>, <2 x i8>* %s1, align 4
-  %s2 = getelementptr <2 x i8>, <2 x i8>* %src, i32 1
-  %l2 = load <2 x i8>, <2 x i8>* %s2, align 4
+  %l1 = load <2 x i8>, ptr %src, align 4
+  %s2 = getelementptr <2 x i8>, ptr %src, i32 1
+  %l2 = load <2 x i8>, ptr %s2, align 4
   %s = shufflevector <2 x i8> %l1, <2 x i8> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i8> %s, <4 x i8> *%dst, align 1
+  store <4 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst2_v4i8(<4 x i8> *%src, <8 x i8> *%dst) {
+define void @vst2_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r0, #4]
@@ -275,16 +264,15 @@ define void @vst2_v4i8(<4 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.16 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i8>, <4 x i8>* %src, i32 0
-  %l1 = load <4 x i8>, <4 x i8>* %s1, align 4
-  %s2 = getelementptr <4 x i8>, <4 x i8>* %src, i32 1
-  %l2 = load <4 x i8>, <4 x i8>* %s2, align 4
+  %l1 = load <4 x i8>, ptr %src, align 4
+  %s2 = getelementptr <4 x i8>, ptr %src, i32 1
+  %l2 = load <4 x i8>, ptr %s2, align 4
   %s = shufflevector <4 x i8> %l1, <4 x i8> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i8> %s, <8 x i8> *%dst, align 1
+  store <8 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst2_v8i8(<8 x i8> *%src, <16 x i8> *%dst) {
+define void @vst2_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r0, #8]
@@ -293,16 +281,15 @@ define void @vst2_v8i8(<8 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.8 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i8>, <8 x i8>* %src, i32 0
-  %l1 = load <8 x i8>, <8 x i8>* %s1, align 4
-  %s2 = getelementptr <8 x i8>, <8 x i8>* %src, i32 1
-  %l2 = load <8 x i8>, <8 x i8>* %s2, align 4
+  %l1 = load <8 x i8>, ptr %src, align 4
+  %s2 = getelementptr <8 x i8>, ptr %src, i32 1
+  %l2 = load <8 x i8>, ptr %s2, align 4
   %s = shufflevector <8 x i8> %l1, <8 x i8> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x i8> %s, <16 x i8> *%dst, align 1
+  store <16 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst2_v16i8(<16 x i8> *%src, <32 x i8> *%dst) {
+define void @vst2_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -311,18 +298,17 @@ define void @vst2_v16i8(<16 x i8> *%src, <32 x i8> *%dst) {
 ; CHECK-NEXT:    vst21.8 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0
-  %l1 = load <16 x i8>, <16 x i8>* %s1, align 4
-  %s2 = getelementptr <16 x i8>, <16 x i8>* %src, i32 1
-  %l2 = load <16 x i8>, <16 x i8>* %s2, align 4
+  %l1 = load <16 x i8>, ptr %src, align 4
+  %s2 = getelementptr <16 x i8>, ptr %src, i32 1
+  %l2 = load <16 x i8>, ptr %s2, align 4
   %s = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x i8> %s, <32 x i8> *%dst, align 1
+  store <32 x i8> %s, ptr %dst, align 1
   ret void
 }
 
 ; i64
 
-define void @vst2_v2i64(<2 x i64> *%src, <4 x i64> *%dst) {
+define void @vst2_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -334,16 +320,15 @@ define void @vst2_v2i64(<2 x i64> *%src, <4 x i64> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0
-  %l1 = load <2 x i64>, <2 x i64>* %s1, align 4
-  %s2 = getelementptr <2 x i64>, <2 x i64>* %src, i32 1
-  %l2 = load <2 x i64>, <2 x i64>* %s2, align 4
+  %l1 = load <2 x i64>, ptr %src, align 4
+  %s2 = getelementptr <2 x i64>, ptr %src, i32 1
+  %l2 = load <2 x i64>, ptr %s2, align 4
   %s = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x i64> %s, <4 x i64> *%dst, align 8
+  store <4 x i64> %s, ptr %dst, align 8
   ret void
 }
 
-define void @vst2_v4i64(<4 x i64> *%src, <8 x i64> *%dst) {
+define void @vst2_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -365,18 +350,17 @@ define void @vst2_v4i64(<4 x i64> *%src, <8 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i64>, <4 x i64>* %src, i32 0
-  %l1 = load <4 x i64>, <4 x i64>* %s1, align 4
-  %s2 = getelementptr <4 x i64>, <4 x i64>* %src, i32 1
-  %l2 = load <4 x i64>, <4 x i64>* %s2, align 4
+  %l1 = load <4 x i64>, ptr %src, align 4
+  %s2 = getelementptr <4 x i64>, ptr %src, i32 1
+  %l2 = load <4 x i64>, ptr %s2, align 4
   %s = shufflevector <4 x i64> %l1, <4 x i64> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i64> %s, <8 x i64> *%dst, align 8
+  store <8 x i64> %s, ptr %dst, align 8
   ret void
 }
 
 ; f32
 
-define void @vst2_v2f32(<2 x float> *%src, <4 x float> *%dst) {
+define void @vst2_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldr s0, [r0]
@@ -386,16 +370,15 @@ define void @vst2_v2f32(<2 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x float>, <2 x float>* %src, i32 0
-  %l1 = load <2 x float>, <2 x float>* %s1, align 4
-  %s2 = getelementptr <2 x float>, <2 x float>* %src, i32 1
-  %l2 = load <2 x float>, <2 x float>* %s2, align 4
+  %l1 = load <2 x float>, ptr %src, align 4
+  %s2 = getelementptr <2 x float>, ptr %src, i32 1
+  %l2 = load <2 x float>, ptr %s2, align 4
   %s = shufflevector <2 x float> %l1, <2 x float> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x float> %s, <4 x float> *%dst, align 4
+  store <4 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v4f32(<4 x float> *%src, <8 x float> *%dst) {
+define void @vst2_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -404,16 +387,15 @@ define void @vst2_v4f32(<4 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vst21.32 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
   %s = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x float> %s, <8 x float> *%dst, align 4
+  store <8 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v8f32(<8 x float> *%src, <16 x float> *%dst) {
+define void @vst2_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
@@ -426,16 +408,15 @@ define void @vst2_v8f32(<8 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vst21.32 {q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x float>, <8 x float>* %src, i32 0
-  %l1 = load <8 x float>, <8 x float>* %s1, align 4
-  %s2 = getelementptr <8 x float>, <8 x float>* %src, i32 1
-  %l2 = load <8 x float>, <8 x float>* %s2, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
+  %s2 = getelementptr <8 x float>, ptr %src, i32 1
+  %l2 = load <8 x float>, ptr %s2, align 4
   %s = shufflevector <8 x float> %l1, <8 x float> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x float> %s, <16 x float> *%dst, align 4
+  store <16 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v16f32(<16 x float> *%src, <32 x float> *%dst) {
+define void @vst2_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -461,16 +442,15 @@ define void @vst2_v16f32(<16 x float> *%src, <32 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x float>, <16 x float>* %src, i32 0
-  %l1 = load <16 x float>, <16 x float>* %s1, align 4
-  %s2 = getelementptr <16 x float>, <16 x float>* %src, i32 1
-  %l2 = load <16 x float>, <16 x float>* %s2, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
+  %s2 = getelementptr <16 x float>, ptr %src, i32 1
+  %l2 = load <16 x float>, ptr %s2, align 4
   %s = shufflevector <16 x float> %l1, <16 x float> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x float> %s, <32 x float> *%dst, align 4
+  store <32 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst2_v4f32_align1(<4 x float> *%src, <8 x float> *%dst) {
+define void @vst2_v4f32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4f32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -487,18 +467,17 @@ define void @vst2_v4f32_align1(<4 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vstrb.8 q3, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
   %s = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x float> %s, <8 x float> *%dst, align 1
+  store <8 x float> %s, ptr %dst, align 1
   ret void
 }
 
 ; f16
 
-define void @vst2_v2f16(<2 x half> *%src, <4 x half> *%dst) {
+define void @vst2_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r0, [r0]
@@ -513,16 +492,15 @@ define void @vst2_v2f16(<2 x half> *%src, <4 x half> *%dst) {
 ; CHECK-NEXT:    str r0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x half>, <2 x half>* %src, i32 0
-  %l1 = load <2 x half>, <2 x half>* %s1, align 4
-  %s2 = getelementptr <2 x half>, <2 x half>* %src, i32 1
-  %l2 = load <2 x half>, <2 x half>* %s2, align 4
+  %l1 = load <2 x half>, ptr %src, align 4
+  %s2 = getelementptr <2 x half>, ptr %src, i32 1
+  %l2 = load <2 x half>, ptr %s2, align 4
   %s = shufflevector <2 x half> %l1, <2 x half> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x half> %s, <4 x half> *%dst, align 2
+  store <4 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v4f16(<4 x half> *%src, <8 x half> *%dst) {
+define void @vst2_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r12, [r0]
@@ -546,16 +524,15 @@ define void @vst2_v4f16(<4 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrh.16 q2, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x half>, <4 x half>* %src, i32 0
-  %l1 = load <4 x half>, <4 x half>* %s1, align 4
-  %s2 = getelementptr <4 x half>, <4 x half>* %src, i32 1
-  %l2 = load <4 x half>, <4 x half>* %s2, align 4
+  %l1 = load <4 x half>, ptr %src, align 4
+  %s2 = getelementptr <4 x half>, ptr %src, i32 1
+  %l2 = load <4 x half>, ptr %s2, align 4
   %s = shufflevector <4 x half> %l1, <4 x half> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x half> %s, <8 x half> *%dst, align 2
+  store <8 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v8f16(<8 x half> *%src, <16 x half> *%dst) {
+define void @vst2_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
@@ -564,16 +541,15 @@ define void @vst2_v8f16(<8 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vst21.16 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
   %s = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x half> %s, <16 x half> *%dst, align 2
+  store <16 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v16f16(<16 x half> *%src, <32 x half> *%dst) {
+define void @vst2_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
@@ -586,16 +562,15 @@ define void @vst2_v16f16(<16 x half> *%src, <32 x half> *%dst) {
 ; CHECK-NEXT:    vst21.16 {q0, q1}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x half>, <16 x half>* %src, i32 0
-  %l1 = load <16 x half>, <16 x half>* %s1, align 4
-  %s2 = getelementptr <16 x half>, <16 x half>* %src, i32 1
-  %l2 = load <16 x half>, <16 x half>* %s2, align 4
+  %l1 = load <16 x half>, ptr %src, align 4
+  %s2 = getelementptr <16 x half>, ptr %src, i32 1
+  %l2 = load <16 x half>, ptr %s2, align 4
   %s = shufflevector <16 x half> %l1, <16 x half> %l2, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
-  store <32 x half> %s, <32 x half> *%dst, align 2
+  store <32 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst2_v8f16_align1(<8 x half> *%src, <16 x half> *%dst) {
+define void @vst2_v8f16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v8f16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
@@ -626,18 +601,17 @@ define void @vst2_v8f16_align1(<8 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vstrb.8 q3, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
   %s = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
-  store <16 x half> %s, <16 x half> *%dst, align 1
+  store <16 x half> %s, ptr %dst, align 1
   ret void
 }
 
 ; f64
 
-define void @vst2_v2f64(<2 x double> *%src, <4 x double> *%dst) {
+define void @vst2_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
@@ -649,16 +623,15 @@ define void @vst2_v2f64(<2 x double> *%src, <4 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0
-  %l1 = load <2 x double>, <2 x double>* %s1, align 4
-  %s2 = getelementptr <2 x double>, <2 x double>* %src, i32 1
-  %l2 = load <2 x double>, <2 x double>* %s2, align 4
+  %l1 = load <2 x double>, ptr %src, align 4
+  %s2 = getelementptr <2 x double>, ptr %src, i32 1
+  %l2 = load <2 x double>, ptr %s2, align 4
   %s = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-  store <4 x double> %s, <4 x double> *%dst, align 8
+  store <4 x double> %s, ptr %dst, align 8
   ret void
 }
 
-define void @vst2_v4f64(<4 x double> *%src, <8 x double> *%dst) {
+define void @vst2_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst2_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -680,11 +653,10 @@ define void @vst2_v4f64(<4 x double> *%src, <8 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x double>, <4 x double>* %src, i32 0
-  %l1 = load <4 x double>, <4 x double>* %s1, align 4
-  %s2 = getelementptr <4 x double>, <4 x double>* %src, i32 1
-  %l2 = load <4 x double>, <4 x double>* %s2, align 4
+  %l1 = load <4 x double>, ptr %src, align 4
+  %s2 = getelementptr <4 x double>, ptr %src, i32 1
+  %l2 = load <4 x double>, ptr %s2, align 4
   %s = shufflevector <4 x double> %l1, <4 x double> %l2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x double> %s, <8 x double> *%dst, align 8
+  store <8 x double> %s, ptr %dst, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
index 2f59c59d1f199..55e5d1aa5e342 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vst3_v2i32(<2 x i32> *%src, <6 x i32> *%dst) {
+define void @vst3_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -22,20 +22,19 @@ define void @vst3_v2i32(<2 x i32> *%src, <6 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q2, [r1]
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0
-  %l1 = load <2 x i32>, <2 x i32>* %s1, align 4
-  %s2 = getelementptr <2 x i32>, <2 x i32>* %src, i32 1
-  %l2 = load <2 x i32>, <2 x i32>* %s2, align 4
-  %s3 = getelementptr <2 x i32>, <2 x i32>* %src, i32 2
-  %l3 = load <2 x i32>, <2 x i32>* %s3, align 4
+  %l1 = load <2 x i32>, ptr %src, align 4
+  %s2 = getelementptr <2 x i32>, ptr %src, i32 1
+  %l2 = load <2 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i32>, ptr %src, i32 2
+  %l3 = load <2 x i32>, ptr %s3, align 4
   %t1 = shufflevector <2 x i32> %l1, <2 x i32> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i32> %l3, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x i32> %t1, <4 x i32> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x i32> %s, <6 x i32> *%dst
+  store <6 x i32> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4i32(<4 x i32> *%src, <12 x i32> *%dst) {
+define void @vst3_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -61,20 +60,19 @@ define void @vst3_v4i32(<4 x i32> *%src, <12 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
-  %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2
-  %l3 = load <4 x i32>, <4 x i32>* %s3, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i32>, ptr %src, i32 2
+  %l3 = load <4 x i32>, ptr %s3, align 4
   %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i32> %l3, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x i32> %s, <12 x i32> *%dst
+  store <12 x i32> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v8i32(<8 x i32> *%src, <24 x i32> *%dst) {
+define void @vst3_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r7, lr}
@@ -120,20 +118,19 @@ define void @vst3_v8i32(<8 x i32> *%src, <24 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
-  %s1 = getelementptr <8 x i32>, <8 x i32>* %src, i32 0
-  %l1 = load <8 x i32>, <8 x i32>* %s1, align 4
-  %s2 = getelementptr <8 x i32>, <8 x i32>* %src, i32 1
-  %l2 = load <8 x i32>, <8 x i32>* %s2, align 4
-  %s3 = getelementptr <8 x i32>, <8 x i32>* %src, i32 2
-  %l3 = load <8 x i32>, <8 x i32>* %s3, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
+  %s2 = getelementptr <8 x i32>, ptr %src, i32 1
+  %l2 = load <8 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i32>, ptr %src, i32 2
+  %l3 = load <8 x i32>, ptr %s3, align 4
   %t1 = shufflevector <8 x i32> %l1, <8 x i32> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i32> %l3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
-  store <24 x i32> %s, <24 x i32> *%dst
+  store <24 x i32> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v16i32(<16 x i32> *%src, <48 x i32> *%dst) {
+define void @vst3_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -244,22 +241,21 @@ define void @vst3_v16i32(<16 x i32> *%src, <48 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %s1 = getelementptr <16 x i32>, <16 x i32>* %src, i32 0
-  %l1 = load <16 x i32>, <16 x i32>* %s1, align 4
-  %s2 = getelementptr <16 x i32>, <16 x i32>* %src, i32 1
-  %l2 = load <16 x i32>, <16 x i32>* %s2, align 4
-  %s3 = getelementptr <16 x i32>, <16 x i32>* %src, i32 2
-  %l3 = load <16 x i32>, <16 x i32>* %s3, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
+  %s2 = getelementptr <16 x i32>, ptr %src, i32 1
+  %l2 = load <16 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i32>, ptr %src, i32 2
+  %l3 = load <16 x i32>, ptr %s3, align 4
   %t1 = shufflevector <16 x i32> %l1, <16 x i32> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i32> %l3, <16 x i32> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <32 x i32> %t1, <32 x i32> %t2, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
-  store <48 x i32> %s, <48 x i32> *%dst
+  store <48 x i32> %s, ptr %dst
   ret void
 }
 
 ; i16
 
-define void @vst3_v2i16(<2 x i16> *%src, <6 x i16> *%dst) {
+define void @vst3_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -283,20 +279,19 @@ define void @vst3_v2i16(<2 x i16> *%src, <6 x i16> *%dst) {
 ; CHECK-NEXT:    str r0, [r1, #8]
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %s1 = getelementptr <2 x i16>, <2 x i16>* %src, i32 0
-  %l1 = load <2 x i16>, <2 x i16>* %s1, align 4
-  %s2 = getelementptr <2 x i16>, <2 x i16>* %src, i32 1
-  %l2 = load <2 x i16>, <2 x i16>* %s2, align 4
-  %s3 = getelementptr <2 x i16>, <2 x i16>* %src, i32 2
-  %l3 = load <2 x i16>, <2 x i16>* %s3, align 4
+  %l1 = load <2 x i16>, ptr %src, align 4
+  %s2 = getelementptr <2 x i16>, ptr %src, i32 1
+  %l2 = load <2 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i16>, ptr %src, i32 2
+  %l3 = load <2 x i16>, ptr %s3, align 4
   %t1 = shufflevector <2 x i16> %l1, <2 x i16> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i16> %l3, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x i16> %t1, <4 x i16> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x i16> %s, <6 x i16> *%dst
+  store <6 x i16> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4i16(<4 x i16> *%src, <12 x i16> *%dst) {
+define void @vst3_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -325,20 +320,19 @@ define void @vst3_v4i16(<4 x i16> *%src, <12 x i16> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %s1 = getelementptr <4 x i16>, <4 x i16>* %src, i32 0
-  %l1 = load <4 x i16>, <4 x i16>* %s1, align 4
-  %s2 = getelementptr <4 x i16>, <4 x i16>* %src, i32 1
-  %l2 = load <4 x i16>, <4 x i16>* %s2, align 4
-  %s3 = getelementptr <4 x i16>, <4 x i16>* %src, i32 2
-  %l3 = load <4 x i16>, <4 x i16>* %s3, align 4
+  %l1 = load <4 x i16>, ptr %src, align 4
+  %s2 = getelementptr <4 x i16>, ptr %src, i32 1
+  %l2 = load <4 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i16>, ptr %src, i32 2
+  %l3 = load <4 x i16>, ptr %s3, align 4
   %t1 = shufflevector <4 x i16> %l1, <4 x i16> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i16> %l3, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x i16> %t1, <8 x i16> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x i16> %s, <12 x i16> *%dst
+  store <12 x i16> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
+define void @vst3_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -392,20 +386,19 @@ define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
-  %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2
-  %l3 = load <8 x i16>, <8 x i16>* %s3, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i16>, ptr %src, i32 2
+  %l3 = load <8 x i16>, ptr %s3, align 4
   %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i16> %l3, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
-  store <24 x i16> %s, <24 x i16> *%dst
+  store <24 x i16> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
+define void @vst3_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -518,22 +511,21 @@ define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i16>, <16 x i16>* %src, i32 0
-  %l1 = load <16 x i16>, <16 x i16>* %s1, align 4
-  %s2 = getelementptr <16 x i16>, <16 x i16>* %src, i32 1
-  %l2 = load <16 x i16>, <16 x i16>* %s2, align 4
-  %s3 = getelementptr <16 x i16>, <16 x i16>* %src, i32 2
-  %l3 = load <16 x i16>, <16 x i16>* %s3, align 4
+  %l1 = load <16 x i16>, ptr %src, align 4
+  %s2 = getelementptr <16 x i16>, ptr %src, i32 1
+  %l2 = load <16 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i16>, ptr %src, i32 2
+  %l3 = load <16 x i16>, ptr %s3, align 4
   %t1 = shufflevector <16 x i16> %l1, <16 x i16> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i16> %l3, <16 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <32 x i16> %t1, <32 x i16> %t2, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
-  store <48 x i16> %s, <48 x i16> *%dst
+  store <48 x i16> %s, ptr %dst
   ret void
 }
 
 ; i8
 
-define void @vst3_v2i8(<2 x i8> *%src, <6 x i8> *%dst) {
+define void @vst3_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -566,20 +558,19 @@ define void @vst3_v2i8(<2 x i8> *%src, <6 x i8> *%dst) {
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
-  %s1 = getelementptr <2 x i8>, <2 x i8>* %src, i32 0
-  %l1 = load <2 x i8>, <2 x i8>* %s1, align 4
-  %s2 = getelementptr <2 x i8>, <2 x i8>* %src, i32 1
-  %l2 = load <2 x i8>, <2 x i8>* %s2, align 4
-  %s3 = getelementptr <2 x i8>, <2 x i8>* %src, i32 2
-  %l3 = load <2 x i8>, <2 x i8>* %s3, align 4
+  %l1 = load <2 x i8>, ptr %src, align 4
+  %s2 = getelementptr <2 x i8>, ptr %src, i32 1
+  %l2 = load <2 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i8>, ptr %src, i32 2
+  %l3 = load <2 x i8>, ptr %s3, align 4
   %t1 = shufflevector <2 x i8> %l1, <2 x i8> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i8> %l3, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x i8> %t1, <4 x i8> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x i8> %s, <6 x i8> *%dst
+  store <6 x i8> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4i8(<4 x i8> *%src, <12 x i8> *%dst) {
+define void @vst3_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -610,20 +601,19 @@ define void @vst3_v4i8(<4 x i8> *%src, <12 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.16 q1, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <4 x i8>, <4 x i8>* %src, i32 0
-  %l1 = load <4 x i8>, <4 x i8>* %s1, align 4
-  %s2 = getelementptr <4 x i8>, <4 x i8>* %src, i32 1
-  %l2 = load <4 x i8>, <4 x i8>* %s2, align 4
-  %s3 = getelementptr <4 x i8>, <4 x i8>* %src, i32 2
-  %l3 = load <4 x i8>, <4 x i8>* %s3, align 4
+  %l1 = load <4 x i8>, ptr %src, align 4
+  %s2 = getelementptr <4 x i8>, ptr %src, i32 1
+  %l2 = load <4 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i8>, ptr %src, i32 2
+  %l3 = load <4 x i8>, ptr %s3, align 4
   %t1 = shufflevector <4 x i8> %l1, <4 x i8> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i8> %l3, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x i8> %t1, <8 x i8> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x i8> %s, <12 x i8> *%dst
+  store <12 x i8> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v8i8(<8 x i8> *%src, <24 x i8> *%dst) {
+define void @vst3_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -681,20 +671,19 @@ define void @vst3_v8i8(<8 x i8> *%src, <24 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i8>, <8 x i8>* %src, i32 0
-  %l1 = load <8 x i8>, <8 x i8>* %s1, align 4
-  %s2 = getelementptr <8 x i8>, <8 x i8>* %src, i32 1
-  %l2 = load <8 x i8>, <8 x i8>* %s2, align 4
-  %s3 = getelementptr <8 x i8>, <8 x i8>* %src, i32 2
-  %l3 = load <8 x i8>, <8 x i8>* %s3, align 4
+  %l1 = load <8 x i8>, ptr %src, align 4
+  %s2 = getelementptr <8 x i8>, ptr %src, i32 1
+  %l2 = load <8 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i8>, ptr %src, i32 2
+  %l3 = load <8 x i8>, ptr %s3, align 4
   %t1 = shufflevector <8 x i8> %l1, <8 x i8> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i8> %l3, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <16 x i8> %t1, <16 x i8> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
-  store <24 x i8> %s, <24 x i8> *%dst
+  store <24 x i8> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v16i8(<16 x i8> *%src, <48 x i8> *%dst) {
+define void @vst3_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -868,22 +857,21 @@ define void @vst3_v16i8(<16 x i8> *%src, <48 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0
-  %l1 = load <16 x i8>, <16 x i8>* %s1, align 4
-  %s2 = getelementptr <16 x i8>, <16 x i8>* %src, i32 1
-  %l2 = load <16 x i8>, <16 x i8>* %s2, align 4
-  %s3 = getelementptr <16 x i8>, <16 x i8>* %src, i32 2
-  %l3 = load <16 x i8>, <16 x i8>* %s3, align 4
+  %l1 = load <16 x i8>, ptr %src, align 4
+  %s2 = getelementptr <16 x i8>, ptr %src, i32 1
+  %l2 = load <16 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i8>, ptr %src, i32 2
+  %l3 = load <16 x i8>, ptr %s3, align 4
   %t1 = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i8> %l3, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <32 x i8> %t1, <32 x i8> %t2, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
-  store <48 x i8> %s, <48 x i8> *%dst
+  store <48 x i8> %s, ptr %dst
   ret void
 }
 
 ; i64
 
-define void @vst3_v2i64(<2 x i64> *%src, <6 x i64> *%dst) {
+define void @vst3_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -898,20 +886,19 @@ define void @vst3_v2i64(<2 x i64> *%src, <6 x i64> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0
-  %l1 = load <2 x i64>, <2 x i64>* %s1, align 4
-  %s2 = getelementptr <2 x i64>, <2 x i64>* %src, i32 1
-  %l2 = load <2 x i64>, <2 x i64>* %s2, align 4
-  %s3 = getelementptr <2 x i64>, <2 x i64>* %src, i32 2
-  %l3 = load <2 x i64>, <2 x i64>* %s3, align 4
+  %l1 = load <2 x i64>, ptr %src, align 4
+  %s2 = getelementptr <2 x i64>, ptr %src, i32 1
+  %l2 = load <2 x i64>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i64>, ptr %src, i32 2
+  %l3 = load <2 x i64>, ptr %s3, align 4
   %t1 = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i64> %l3, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x i64> %t1, <4 x i64> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x i64> %s, <6 x i64> *%dst
+  store <6 x i64> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4i64(<4 x i64> *%src, <12 x i64> *%dst) {
+define void @vst3_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -941,22 +928,21 @@ define void @vst3_v4i64(<4 x i64> *%src, <12 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i64>, <4 x i64>* %src, i32 0
-  %l1 = load <4 x i64>, <4 x i64>* %s1, align 4
-  %s2 = getelementptr <4 x i64>, <4 x i64>* %src, i32 1
-  %l2 = load <4 x i64>, <4 x i64>* %s2, align 4
-  %s3 = getelementptr <4 x i64>, <4 x i64>* %src, i32 2
-  %l3 = load <4 x i64>, <4 x i64>* %s3, align 4
+  %l1 = load <4 x i64>, ptr %src, align 4
+  %s2 = getelementptr <4 x i64>, ptr %src, i32 1
+  %l2 = load <4 x i64>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i64>, ptr %src, i32 2
+  %l3 = load <4 x i64>, ptr %s3, align 4
   %t1 = shufflevector <4 x i64> %l1, <4 x i64> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i64> %l3, <4 x i64> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x i64> %t1, <8 x i64> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x i64> %s, <12 x i64> *%dst
+  store <12 x i64> %s, ptr %dst
   ret void
 }
 
 ; f32
 
-define void @vst3_v2f32(<2 x float> *%src, <6 x float> *%dst) {
+define void @vst3_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r2, [r0, #20]
@@ -969,20 +955,19 @@ define void @vst3_v2f32(<2 x float> *%src, <6 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x float>, <2 x float>* %src, i32 0
-  %l1 = load <2 x float>, <2 x float>* %s1, align 4
-  %s2 = getelementptr <2 x float>, <2 x float>* %src, i32 1
-  %l2 = load <2 x float>, <2 x float>* %s2, align 4
-  %s3 = getelementptr <2 x float>, <2 x float>* %src, i32 2
-  %l3 = load <2 x float>, <2 x float>* %s3, align 4
+  %l1 = load <2 x float>, ptr %src, align 4
+  %s2 = getelementptr <2 x float>, ptr %src, i32 1
+  %l2 = load <2 x float>, ptr %s2, align 4
+  %s3 = getelementptr <2 x float>, ptr %src, i32 2
+  %l3 = load <2 x float>, ptr %s3, align 4
   %t1 = shufflevector <2 x float> %l1, <2 x float> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x float> %l3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x float> %t1, <4 x float> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x float> %s, <6 x float> *%dst
+  store <6 x float> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4f32(<4 x float> *%src, <12 x float> *%dst) {
+define void @vst3_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1007,20 +992,19 @@ define void @vst3_v4f32(<4 x float> *%src, <12 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
-  %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2
-  %l3 = load <4 x float>, <4 x float>* %s3, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
+  %s3 = getelementptr <4 x float>, ptr %src, i32 2
+  %l3 = load <4 x float>, ptr %s3, align 4
   %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x float> %l3, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x float> %t1, <8 x float> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x float> %s, <12 x float> *%dst
+  store <12 x float> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v8f32(<8 x float> *%src, <24 x float> *%dst) {
+define void @vst3_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1069,20 +1053,19 @@ define void @vst3_v8f32(<8 x float> *%src, <24 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x float>, <8 x float>* %src, i32 0
-  %l1 = load <8 x float>, <8 x float>* %s1, align 4
-  %s2 = getelementptr <8 x float>, <8 x float>* %src, i32 1
-  %l2 = load <8 x float>, <8 x float>* %s2, align 4
-  %s3 = getelementptr <8 x float>, <8 x float>* %src, i32 2
-  %l3 = load <8 x float>, <8 x float>* %s3, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
+  %s2 = getelementptr <8 x float>, ptr %src, i32 1
+  %l2 = load <8 x float>, ptr %s2, align 4
+  %s3 = getelementptr <8 x float>, ptr %src, i32 2
+  %l3 = load <8 x float>, ptr %s3, align 4
   %t1 = shufflevector <8 x float> %l1, <8 x float> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x float> %l3, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <16 x float> %t1, <16 x float> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
-  store <24 x float> %s, <24 x float> *%dst
+  store <24 x float> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v16f32(<16 x float> *%src, <48 x float> *%dst) {
+define void @vst3_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1182,22 +1165,21 @@ define void @vst3_v16f32(<16 x float> *%src, <48 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x float>, <16 x float>* %src, i32 0
-  %l1 = load <16 x float>, <16 x float>* %s1, align 4
-  %s2 = getelementptr <16 x float>, <16 x float>* %src, i32 1
-  %l2 = load <16 x float>, <16 x float>* %s2, align 4
-  %s3 = getelementptr <16 x float>, <16 x float>* %src, i32 2
-  %l3 = load <16 x float>, <16 x float>* %s3, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
+  %s2 = getelementptr <16 x float>, ptr %src, i32 1
+  %l2 = load <16 x float>, ptr %s2, align 4
+  %s3 = getelementptr <16 x float>, ptr %src, i32 2
+  %l3 = load <16 x float>, ptr %s3, align 4
   %t1 = shufflevector <16 x float> %l1, <16 x float> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x float> %l3, <16 x float> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <32 x float> %t1, <32 x float> %t2, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
-  store <48 x float> %s, <48 x float> *%dst
+  store <48 x float> %s, ptr %dst
   ret void
 }
 
 ; f16
 
-define void @vst3_v2f16(<2 x half> *%src, <6 x half> *%dst) {
+define void @vst3_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldrd r2, r3, [r0]
@@ -1217,20 +1199,19 @@ define void @vst3_v2f16(<2 x half> *%src, <6 x half> *%dst) {
 ; CHECK-NEXT:    stm r1!, {r0, r2, r3}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x half>, <2 x half>* %src, i32 0
-  %l1 = load <2 x half>, <2 x half>* %s1, align 4
-  %s2 = getelementptr <2 x half>, <2 x half>* %src, i32 1
-  %l2 = load <2 x half>, <2 x half>* %s2, align 4
-  %s3 = getelementptr <2 x half>, <2 x half>* %src, i32 2
-  %l3 = load <2 x half>, <2 x half>* %s3, align 4
+  %l1 = load <2 x half>, ptr %src, align 4
+  %s2 = getelementptr <2 x half>, ptr %src, i32 1
+  %l2 = load <2 x half>, ptr %s2, align 4
+  %s3 = getelementptr <2 x half>, ptr %src, i32 2
+  %l3 = load <2 x half>, ptr %s3, align 4
   %t1 = shufflevector <2 x half> %l1, <2 x half> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x half> %l3, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x half> %t1, <4 x half> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x half> %s, <6 x half> *%dst
+  store <6 x half> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4f16(<4 x half> *%src, <12 x half> *%dst) {
+define void @vst3_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, lr}
@@ -1264,20 +1245,19 @@ define void @vst3_v4f16(<4 x half> *%src, <12 x half> *%dst) {
 ; CHECK-NEXT:    strd r0, r2, [r1, #16]
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
-  %s1 = getelementptr <4 x half>, <4 x half>* %src, i32 0
-  %l1 = load <4 x half>, <4 x half>* %s1, align 4
-  %s2 = getelementptr <4 x half>, <4 x half>* %src, i32 1
-  %l2 = load <4 x half>, <4 x half>* %s2, align 4
-  %s3 = getelementptr <4 x half>, <4 x half>* %src, i32 2
-  %l3 = load <4 x half>, <4 x half>* %s3, align 4
+  %l1 = load <4 x half>, ptr %src, align 4
+  %s2 = getelementptr <4 x half>, ptr %src, i32 1
+  %l2 = load <4 x half>, ptr %s2, align 4
+  %s3 = getelementptr <4 x half>, ptr %src, i32 2
+  %l3 = load <4 x half>, ptr %s3, align 4
   %t1 = shufflevector <4 x half> %l1, <4 x half> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x half> %l3, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x half> %t1, <8 x half> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x half> %s, <12 x half> *%dst
+  store <12 x half> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) {
+define void @vst3_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -1334,20 +1314,19 @@ define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
-  %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2
-  %l3 = load <8 x half>, <8 x half>* %s3, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
+  %s3 = getelementptr <8 x half>, ptr %src, i32 2
+  %l3 = load <8 x half>, ptr %s3, align 4
   %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x half> %l3, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <16 x half> %t1, <16 x half> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
-  store <24 x half> %s, <24 x half> *%dst
+  store <24 x half> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v16f16(<16 x half> *%src, <48 x half> *%dst) {
+define void @vst3_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1477,22 +1456,21 @@ define void @vst3_v16f16(<16 x half> *%src, <48 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x half>, <16 x half>* %src, i32 0
-  %l1 = load <16 x half>, <16 x half>* %s1, align 4
-  %s2 = getelementptr <16 x half>, <16 x half>* %src, i32 1
-  %l2 = load <16 x half>, <16 x half>* %s2, align 4
-  %s3 = getelementptr <16 x half>, <16 x half>* %src, i32 2
-  %l3 = load <16 x half>, <16 x half>* %s3, align 4
+  %l1 = load <16 x half>, ptr %src, align 4
+  %s2 = getelementptr <16 x half>, ptr %src, i32 1
+  %l2 = load <16 x half>, ptr %s2, align 4
+  %s3 = getelementptr <16 x half>, ptr %src, i32 2
+  %l3 = load <16 x half>, ptr %s3, align 4
   %t1 = shufflevector <16 x half> %l1, <16 x half> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x half> %l3, <16 x half> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <32 x half> %t1, <32 x half> %t2, <48 x i32> <i32 0, i32 16, i32 32, i32 1, i32 17, i32 33, i32 2, i32 18, i32 34, i32 3, i32 19, i32 35, i32 4, i32 20, i32 36, i32 5, i32 21, i32 37, i32 6, i32 22, i32 38, i32 7, i32 23, i32 39, i32 8, i32 24, i32 40, i32 9, i32 25, i32 41, i32 10, i32 26, i32 42, i32 11, i32 27, i32 43, i32 12, i32 28, i32 44, i32 13, i32 29, i32 45, i32 14, i32 30, i32 46, i32 15, i32 31, i32 47>
-  store <48 x half> %s, <48 x half> *%dst
+  store <48 x half> %s, ptr %dst
   ret void
 }
 
 ; f64
 
-define void @vst3_v2f64(<2 x double> *%src, <6 x double> *%dst) {
+define void @vst3_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
@@ -1507,20 +1485,19 @@ define void @vst3_v2f64(<2 x double> *%src, <6 x double> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0
-  %l1 = load <2 x double>, <2 x double>* %s1, align 4
-  %s2 = getelementptr <2 x double>, <2 x double>* %src, i32 1
-  %l2 = load <2 x double>, <2 x double>* %s2, align 4
-  %s3 = getelementptr <2 x double>, <2 x double>* %src, i32 2
-  %l3 = load <2 x double>, <2 x double>* %s3, align 4
+  %l1 = load <2 x double>, ptr %src, align 4
+  %s2 = getelementptr <2 x double>, ptr %src, i32 1
+  %l2 = load <2 x double>, ptr %s2, align 4
+  %s3 = getelementptr <2 x double>, ptr %src, i32 2
+  %l3 = load <2 x double>, ptr %s3, align 4
   %t1 = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x double> %l3, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s = shufflevector <4 x double> %t1, <4 x double> %t2, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
-  store <6 x double> %s, <6 x double> *%dst
+  store <6 x double> %s, ptr %dst
   ret void
 }
 
-define void @vst3_v4f64(<4 x double> *%src, <12 x double> *%dst) {
+define void @vst3_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst3_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1550,15 +1527,14 @@ define void @vst3_v4f64(<4 x double> *%src, <12 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x double>, <4 x double>* %src, i32 0
-  %l1 = load <4 x double>, <4 x double>* %s1, align 4
-  %s2 = getelementptr <4 x double>, <4 x double>* %src, i32 1
-  %l2 = load <4 x double>, <4 x double>* %s2, align 4
-  %s3 = getelementptr <4 x double>, <4 x double>* %src, i32 2
-  %l3 = load <4 x double>, <4 x double>* %s3, align 4
+  %l1 = load <4 x double>, ptr %src, align 4
+  %s2 = getelementptr <4 x double>, ptr %src, i32 1
+  %l2 = load <4 x double>, ptr %s2, align 4
+  %s3 = getelementptr <4 x double>, ptr %src, i32 2
+  %l3 = load <4 x double>, ptr %s3, align 4
   %t1 = shufflevector <4 x double> %l1, <4 x double> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x double> %l3, <4 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s = shufflevector <8 x double> %t1, <8 x double> %t2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x double> %s, <12 x double> *%dst
+  store <12 x double> %s, ptr %dst
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst4-post.ll b/llvm/test/CodeGen/Thumb2/mve-vst4-post.ll
index 5fe7f2f3d7d2f..869c9cb7afce8 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4-post.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4-post.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define <16 x i32> *@vst4_v4i32(<4 x i32> *%src, <16 x i32> *%dst) {
+define ptr @vst4_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -17,25 +17,24 @@ define <16 x i32> *@vst4_v4i32(<4 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
-  %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2
-  %l3 = load <4 x i32>, <4 x i32>* %s3, align 4
-  %s4 = getelementptr <4 x i32>, <4 x i32>* %src, i32 3
-  %l4 = load <4 x i32>, <4 x i32>* %s4, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i32>, ptr %src, i32 2
+  %l3 = load <4 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i32>, ptr %src, i32 3
+  %l4 = load <4 x i32>, ptr %s4, align 4
   %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i32> %l3, <4 x i32> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i32> %s, <16 x i32> *%dst
-  %ret = getelementptr inbounds <16 x i32>, <16 x i32>* %dst, i32 1
-  ret <16 x i32> *%ret
+  store <16 x i32> %s, ptr %dst
+  %ret = getelementptr inbounds <16 x i32>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i16
 
-define <32 x i16> *@vst4_v8i16(<8 x i16> *%src, <32 x i16> *%dst) {
+define ptr @vst4_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -49,25 +48,24 @@ define <32 x i16> *@vst4_v8i16(<8 x i16> *%src, <32 x i16> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
-  %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2
-  %l3 = load <8 x i16>, <8 x i16>* %s3, align 4
-  %s4 = getelementptr <8 x i16>, <8 x i16>* %src, i32 3
-  %l4 = load <8 x i16>, <8 x i16>* %s4, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i16>, ptr %src, i32 2
+  %l3 = load <8 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <8 x i16>, ptr %src, i32 3
+  %l4 = load <8 x i16>, ptr %s4, align 4
   %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i16> %l3, <8 x i16> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x i16> %s, <32 x i16> *%dst
-  %ret = getelementptr inbounds <32 x i16>, <32 x i16>* %dst, i32 1
-  ret <32 x i16> *%ret
+  store <32 x i16> %s, ptr %dst
+  %ret = getelementptr inbounds <32 x i16>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i8
 
-define <64 x i8> *@vst4_v16i8(<16 x i8> *%src, <64 x i8> *%dst) {
+define ptr @vst4_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -81,25 +79,24 @@ define <64 x i8> *@vst4_v16i8(<16 x i8> *%src, <64 x i8> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0
-  %l1 = load <16 x i8>, <16 x i8>* %s1, align 4
-  %s2 = getelementptr <16 x i8>, <16 x i8>* %src, i32 1
-  %l2 = load <16 x i8>, <16 x i8>* %s2, align 4
-  %s3 = getelementptr <16 x i8>, <16 x i8>* %src, i32 2
-  %l3 = load <16 x i8>, <16 x i8>* %s3, align 4
-  %s4 = getelementptr <16 x i8>, <16 x i8>* %src, i32 3
-  %l4 = load <16 x i8>, <16 x i8>* %s4, align 4
+  %l1 = load <16 x i8>, ptr %src, align 4
+  %s2 = getelementptr <16 x i8>, ptr %src, i32 1
+  %l2 = load <16 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i8>, ptr %src, i32 2
+  %l3 = load <16 x i8>, ptr %s3, align 4
+  %s4 = getelementptr <16 x i8>, ptr %src, i32 3
+  %l4 = load <16 x i8>, ptr %s4, align 4
   %t1 = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i8> %l3, <16 x i8> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x i8> %t1, <32 x i8> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x i8> %s, <64 x i8> *%dst
-  %ret = getelementptr inbounds <64 x i8>, <64 x i8>* %dst, i32 1
-  ret <64 x i8> *%ret
+  store <64 x i8> %s, ptr %dst
+  %ret = getelementptr inbounds <64 x i8>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; i64
 
-define <8 x i64> *@vst4_v2i64(<2 x i64> *%src, <8 x i64> *%dst) {
+define ptr @vst4_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -122,25 +119,24 @@ define <8 x i64> *@vst4_v2i64(<2 x i64> *%src, <8 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0
-  %l1 = load <2 x i64>, <2 x i64>* %s1, align 4
-  %s2 = getelementptr <2 x i64>, <2 x i64>* %src, i32 1
-  %l2 = load <2 x i64>, <2 x i64>* %s2, align 4
-  %s3 = getelementptr <2 x i64>, <2 x i64>* %src, i32 2
-  %l3 = load <2 x i64>, <2 x i64>* %s3, align 4
-  %s4 = getelementptr <2 x i64>, <2 x i64>* %src, i32 3
-  %l4 = load <2 x i64>, <2 x i64>* %s4, align 4
+  %l1 = load <2 x i64>, ptr %src, align 4
+  %s2 = getelementptr <2 x i64>, ptr %src, i32 1
+  %l2 = load <2 x i64>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i64>, ptr %src, i32 2
+  %l3 = load <2 x i64>, ptr %s3, align 4
+  %s4 = getelementptr <2 x i64>, ptr %src, i32 3
+  %l4 = load <2 x i64>, ptr %s4, align 4
   %t1 = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i64> %l3, <2 x i64> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x i64> %t1, <4 x i64> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x i64> %s, <8 x i64> *%dst
-  %ret = getelementptr inbounds <8 x i64>, <8 x i64>* %dst, i32 1
-  ret <8 x i64> *%ret
+  store <8 x i64> %s, ptr %dst
+  %ret = getelementptr inbounds <8 x i64>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f32
 
-define <16 x float> *@vst4_v4f32(<4 x float> *%src, <16 x float> *%dst) {
+define ptr @vst4_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -154,25 +150,24 @@ define <16 x float> *@vst4_v4f32(<4 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
-  %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2
-  %l3 = load <4 x float>, <4 x float>* %s3, align 4
-  %s4 = getelementptr <4 x float>, <4 x float>* %src, i32 3
-  %l4 = load <4 x float>, <4 x float>* %s4, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
+  %s3 = getelementptr <4 x float>, ptr %src, i32 2
+  %l3 = load <4 x float>, ptr %s3, align 4
+  %s4 = getelementptr <4 x float>, ptr %src, i32 3
+  %l4 = load <4 x float>, ptr %s4, align 4
   %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x float> %l3, <4 x float> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x float> %t1, <8 x float> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x float> %s, <16 x float> *%dst
-  %ret = getelementptr inbounds <16 x float>, <16 x float>* %dst, i32 1
-  ret <16 x float> *%ret
+  store <16 x float> %s, ptr %dst
+  %ret = getelementptr inbounds <16 x float>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f16
 
-define <32 x half> *@vst4_v8f16(<8 x half> *%src, <32 x half> *%dst) {
+define ptr @vst4_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -186,25 +181,24 @@ define <32 x half> *@vst4_v8f16(<8 x half> *%src, <32 x half> *%dst) {
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
-  %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2
-  %l3 = load <8 x half>, <8 x half>* %s3, align 4
-  %s4 = getelementptr <8 x half>, <8 x half>* %src, i32 3
-  %l4 = load <8 x half>, <8 x half>* %s4, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
+  %s3 = getelementptr <8 x half>, ptr %src, i32 2
+  %l3 = load <8 x half>, ptr %s3, align 4
+  %s4 = getelementptr <8 x half>, ptr %src, i32 3
+  %l4 = load <8 x half>, ptr %s4, align 4
   %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x half> %l3, <8 x half> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x half> %t1, <16 x half> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x half> %s, <32 x half> *%dst
-  %ret = getelementptr inbounds <32 x half>, <32 x half>* %dst, i32 1
-  ret <32 x half> *%ret
+  store <32 x half> %s, ptr %dst
+  %ret = getelementptr inbounds <32 x half>, ptr %dst, i32 1
+  ret ptr %ret
 }
 
 ; f64
 
-define <8 x double> *@vst4_v2f64(<2 x double> *%src, <8 x double> *%dst) {
+define ptr @vst4_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9}
@@ -227,18 +221,17 @@ define <8 x double> *@vst4_v2f64(<2 x double> *%src, <8 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0
-  %l1 = load <2 x double>, <2 x double>* %s1, align 4
-  %s2 = getelementptr <2 x double>, <2 x double>* %src, i32 1
-  %l2 = load <2 x double>, <2 x double>* %s2, align 4
-  %s3 = getelementptr <2 x double>, <2 x double>* %src, i32 2
-  %l3 = load <2 x double>, <2 x double>* %s3, align 4
-  %s4 = getelementptr <2 x double>, <2 x double>* %src, i32 3
-  %l4 = load <2 x double>, <2 x double>* %s4, align 4
+  %l1 = load <2 x double>, ptr %src, align 4
+  %s2 = getelementptr <2 x double>, ptr %src, i32 1
+  %l2 = load <2 x double>, ptr %s2, align 4
+  %s3 = getelementptr <2 x double>, ptr %src, i32 2
+  %l3 = load <2 x double>, ptr %s3, align 4
+  %s4 = getelementptr <2 x double>, ptr %src, i32 3
+  %l4 = load <2 x double>, ptr %s4, align 4
   %t1 = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x double> %l3, <2 x double> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x double> %t1, <4 x double> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x double> %s, <8 x double> *%dst
-  %ret = getelementptr inbounds <8 x double>, <8 x double>* %dst, i32 1
-  ret <8 x double> *%ret
+  store <8 x double> %s, ptr %dst
+  %ret = getelementptr inbounds <8 x double>, ptr %dst, i32 1
+  ret ptr %ret
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
index 43ef891eab716..f3a65c40031af 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
@@ -3,7 +3,7 @@
 
 ; i32
 
-define void @vst4_v2i32(<2 x i32> *%src, <8 x i32> *%dst) {
+define void @vst4_v2i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -29,22 +29,21 @@ define void @vst4_v2i32(<2 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0
-  %l1 = load <2 x i32>, <2 x i32>* %s1, align 4
-  %s2 = getelementptr <2 x i32>, <2 x i32>* %src, i32 1
-  %l2 = load <2 x i32>, <2 x i32>* %s2, align 4
-  %s3 = getelementptr <2 x i32>, <2 x i32>* %src, i32 2
-  %l3 = load <2 x i32>, <2 x i32>* %s3, align 4
-  %s4 = getelementptr <2 x i32>, <2 x i32>* %src, i32 3
-  %l4 = load <2 x i32>, <2 x i32>* %s4, align 4
+  %l1 = load <2 x i32>, ptr %src, align 4
+  %s2 = getelementptr <2 x i32>, ptr %src, i32 1
+  %l2 = load <2 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i32>, ptr %src, i32 2
+  %l3 = load <2 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <2 x i32>, ptr %src, i32 3
+  %l4 = load <2 x i32>, ptr %s4, align 4
   %t1 = shufflevector <2 x i32> %l1, <2 x i32> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i32> %l3, <2 x i32> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x i32> %t1, <4 x i32> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x i32> %s, <8 x i32> *%dst, align 4
+  store <8 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v4i32(<4 x i32> *%src, <16 x i32> *%dst) {
+define void @vst4_v4i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -57,22 +56,21 @@ define void @vst4_v4i32(<4 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vst43.32 {q0, q1, q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
-  %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2
-  %l3 = load <4 x i32>, <4 x i32>* %s3, align 4
-  %s4 = getelementptr <4 x i32>, <4 x i32>* %src, i32 3
-  %l4 = load <4 x i32>, <4 x i32>* %s4, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i32>, ptr %src, i32 2
+  %l3 = load <4 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i32>, ptr %src, i32 3
+  %l4 = load <4 x i32>, ptr %s4, align 4
   %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i32> %l3, <4 x i32> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i32> %s, <16 x i32> *%dst, align 4
+  store <16 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v8i32(<8 x i32> *%src, <32 x i32> *%dst) {
+define void @vst4_v8i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -96,22 +94,21 @@ define void @vst4_v8i32(<8 x i32> *%src, <32 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i32>, <8 x i32>* %src, i32 0
-  %l1 = load <8 x i32>, <8 x i32>* %s1, align 4
-  %s2 = getelementptr <8 x i32>, <8 x i32>* %src, i32 1
-  %l2 = load <8 x i32>, <8 x i32>* %s2, align 4
-  %s3 = getelementptr <8 x i32>, <8 x i32>* %src, i32 2
-  %l3 = load <8 x i32>, <8 x i32>* %s3, align 4
-  %s4 = getelementptr <8 x i32>, <8 x i32>* %src, i32 3
-  %l4 = load <8 x i32>, <8 x i32>* %s4, align 4
+  %l1 = load <8 x i32>, ptr %src, align 4
+  %s2 = getelementptr <8 x i32>, ptr %src, i32 1
+  %l2 = load <8 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i32>, ptr %src, i32 2
+  %l3 = load <8 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <8 x i32>, ptr %src, i32 3
+  %l4 = load <8 x i32>, ptr %s4, align 4
   %t1 = shufflevector <8 x i32> %l1, <8 x i32> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i32> %l3, <8 x i32> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x i32> %t1, <16 x i32> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x i32> %s, <32 x i32> *%dst, align 4
+  store <32 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v16i32(<16 x i32> *%src, <64 x i32> *%dst) {
+define void @vst4_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5}
@@ -175,22 +172,21 @@ define void @vst4_v16i32(<16 x i32> *%src, <64 x i32> *%dst) {
 ; CHECK-NEXT:    pop {r4, r5}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i32>, <16 x i32>* %src, i32 0
-  %l1 = load <16 x i32>, <16 x i32>* %s1, align 4
-  %s2 = getelementptr <16 x i32>, <16 x i32>* %src, i32 1
-  %l2 = load <16 x i32>, <16 x i32>* %s2, align 4
-  %s3 = getelementptr <16 x i32>, <16 x i32>* %src, i32 2
-  %l3 = load <16 x i32>, <16 x i32>* %s3, align 4
-  %s4 = getelementptr <16 x i32>, <16 x i32>* %src, i32 3
-  %l4 = load <16 x i32>, <16 x i32>* %s4, align 4
+  %l1 = load <16 x i32>, ptr %src, align 4
+  %s2 = getelementptr <16 x i32>, ptr %src, i32 1
+  %l2 = load <16 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i32>, ptr %src, i32 2
+  %l3 = load <16 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <16 x i32>, ptr %src, i32 3
+  %l4 = load <16 x i32>, ptr %s4, align 4
   %t1 = shufflevector <16 x i32> %l1, <16 x i32> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i32> %l3, <16 x i32> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x i32> %t1, <32 x i32> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x i32> %s, <64 x i32> *%dst, align 4
+  store <64 x i32> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v4i32_align1(<4 x i32> *%src, <16 x i32> *%dst) {
+define void @vst4_v4i32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -221,24 +217,23 @@ define void @vst4_v4i32_align1(<4 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0
-  %l1 = load <4 x i32>, <4 x i32>* %s1, align 4
-  %s2 = getelementptr <4 x i32>, <4 x i32>* %src, i32 1
-  %l2 = load <4 x i32>, <4 x i32>* %s2, align 4
-  %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2
-  %l3 = load <4 x i32>, <4 x i32>* %s3, align 4
-  %s4 = getelementptr <4 x i32>, <4 x i32>* %src, i32 3
-  %l4 = load <4 x i32>, <4 x i32>* %s4, align 4
+  %l1 = load <4 x i32>, ptr %src, align 4
+  %s2 = getelementptr <4 x i32>, ptr %src, i32 1
+  %l2 = load <4 x i32>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i32>, ptr %src, i32 2
+  %l3 = load <4 x i32>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i32>, ptr %src, i32 3
+  %l4 = load <4 x i32>, ptr %s4, align 4
   %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i32> %l3, <4 x i32> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i32> %s, <16 x i32> *%dst, align 1
+  store <16 x i32> %s, ptr %dst, align 1
   ret void
 }
 
 ; i16
 
-define void @vst4_v2i16(<2 x i16> *%src, <8 x i16> *%dst) {
+define void @vst4_v2i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -266,22 +261,21 @@ define void @vst4_v2i16(<2 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vstrh.16 q1, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <2 x i16>, <2 x i16>* %src, i32 0
-  %l1 = load <2 x i16>, <2 x i16>* %s1, align 4
-  %s2 = getelementptr <2 x i16>, <2 x i16>* %src, i32 1
-  %l2 = load <2 x i16>, <2 x i16>* %s2, align 4
-  %s3 = getelementptr <2 x i16>, <2 x i16>* %src, i32 2
-  %l3 = load <2 x i16>, <2 x i16>* %s3, align 4
-  %s4 = getelementptr <2 x i16>, <2 x i16>* %src, i32 3
-  %l4 = load <2 x i16>, <2 x i16>* %s4, align 4
+  %l1 = load <2 x i16>, ptr %src, align 4
+  %s2 = getelementptr <2 x i16>, ptr %src, i32 1
+  %l2 = load <2 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i16>, ptr %src, i32 2
+  %l3 = load <2 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <2 x i16>, ptr %src, i32 3
+  %l4 = load <2 x i16>, ptr %s4, align 4
   %t1 = shufflevector <2 x i16> %l1, <2 x i16> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i16> %l3, <2 x i16> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x i16> %t1, <4 x i16> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x i16> %s, <8 x i16> *%dst, align 2
+  store <8 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v4i16(<4 x i16> *%src, <16 x i16> *%dst) {
+define void @vst4_v4i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -321,22 +315,21 @@ define void @vst4_v4i16(<4 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <4 x i16>, <4 x i16>* %src, i32 0
-  %l1 = load <4 x i16>, <4 x i16>* %s1, align 4
-  %s2 = getelementptr <4 x i16>, <4 x i16>* %src, i32 1
-  %l2 = load <4 x i16>, <4 x i16>* %s2, align 4
-  %s3 = getelementptr <4 x i16>, <4 x i16>* %src, i32 2
-  %l3 = load <4 x i16>, <4 x i16>* %s3, align 4
-  %s4 = getelementptr <4 x i16>, <4 x i16>* %src, i32 3
-  %l4 = load <4 x i16>, <4 x i16>* %s4, align 4
+  %l1 = load <4 x i16>, ptr %src, align 4
+  %s2 = getelementptr <4 x i16>, ptr %src, i32 1
+  %l2 = load <4 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i16>, ptr %src, i32 2
+  %l3 = load <4 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i16>, ptr %src, i32 3
+  %l4 = load <4 x i16>, ptr %s4, align 4
   %t1 = shufflevector <4 x i16> %l1, <4 x i16> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i16> %l3, <4 x i16> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i16> %t1, <8 x i16> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i16> %s, <16 x i16> *%dst, align 2
+  store <16 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v8i16(<8 x i16> *%src, <32 x i16> *%dst) {
+define void @vst4_v8i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -349,22 +342,21 @@ define void @vst4_v8i16(<8 x i16> *%src, <32 x i16> *%dst) {
 ; CHECK-NEXT:    vst43.16 {q0, q1, q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
-  %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2
-  %l3 = load <8 x i16>, <8 x i16>* %s3, align 4
-  %s4 = getelementptr <8 x i16>, <8 x i16>* %src, i32 3
-  %l4 = load <8 x i16>, <8 x i16>* %s4, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i16>, ptr %src, i32 2
+  %l3 = load <8 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <8 x i16>, ptr %src, i32 3
+  %l4 = load <8 x i16>, ptr %s4, align 4
   %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i16> %l3, <8 x i16> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x i16> %s, <32 x i16> *%dst, align 2
+  store <32 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v16i16(<16 x i16> *%src, <64 x i16> *%dst) {
+define void @vst4_v16i16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -388,22 +380,21 @@ define void @vst4_v16i16(<16 x i16> *%src, <64 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i16>, <16 x i16>* %src, i32 0
-  %l1 = load <16 x i16>, <16 x i16>* %s1, align 4
-  %s2 = getelementptr <16 x i16>, <16 x i16>* %src, i32 1
-  %l2 = load <16 x i16>, <16 x i16>* %s2, align 4
-  %s3 = getelementptr <16 x i16>, <16 x i16>* %src, i32 2
-  %l3 = load <16 x i16>, <16 x i16>* %s3, align 4
-  %s4 = getelementptr <16 x i16>, <16 x i16>* %src, i32 3
-  %l4 = load <16 x i16>, <16 x i16>* %s4, align 4
+  %l1 = load <16 x i16>, ptr %src, align 4
+  %s2 = getelementptr <16 x i16>, ptr %src, i32 1
+  %l2 = load <16 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i16>, ptr %src, i32 2
+  %l3 = load <16 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <16 x i16>, ptr %src, i32 3
+  %l4 = load <16 x i16>, ptr %s4, align 4
   %t1 = shufflevector <16 x i16> %l1, <16 x i16> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i16> %l3, <16 x i16> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x i16> %t1, <32 x i16> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x i16> %s, <64 x i16> *%dst, align 2
+  store <64 x i16> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v8i16_align1(<8 x i16> *%src, <32 x i16> *%dst) {
+define void @vst4_v8i16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8i16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -463,24 +454,23 @@ define void @vst4_v8i16_align1(<8 x i16> *%src, <32 x i16> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0
-  %l1 = load <8 x i16>, <8 x i16>* %s1, align 4
-  %s2 = getelementptr <8 x i16>, <8 x i16>* %src, i32 1
-  %l2 = load <8 x i16>, <8 x i16>* %s2, align 4
-  %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2
-  %l3 = load <8 x i16>, <8 x i16>* %s3, align 4
-  %s4 = getelementptr <8 x i16>, <8 x i16>* %src, i32 3
-  %l4 = load <8 x i16>, <8 x i16>* %s4, align 4
+  %l1 = load <8 x i16>, ptr %src, align 4
+  %s2 = getelementptr <8 x i16>, ptr %src, i32 1
+  %l2 = load <8 x i16>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i16>, ptr %src, i32 2
+  %l3 = load <8 x i16>, ptr %s3, align 4
+  %s4 = getelementptr <8 x i16>, ptr %src, i32 3
+  %l4 = load <8 x i16>, ptr %s4, align 4
   %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i16> %l3, <8 x i16> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x i16> %s, <32 x i16> *%dst, align 1
+  store <32 x i16> %s, ptr %dst, align 1
   ret void
 }
 
 ; i8
 
-define void @vst4_v2i8(<2 x i8> *%src, <8 x i8> *%dst) {
+define void @vst4_v2i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -508,22 +498,21 @@ define void @vst4_v2i8(<2 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vstrb.16 q0, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <2 x i8>, <2 x i8>* %src, i32 0
-  %l1 = load <2 x i8>, <2 x i8>* %s1, align 4
-  %s2 = getelementptr <2 x i8>, <2 x i8>* %src, i32 1
-  %l2 = load <2 x i8>, <2 x i8>* %s2, align 4
-  %s3 = getelementptr <2 x i8>, <2 x i8>* %src, i32 2
-  %l3 = load <2 x i8>, <2 x i8>* %s3, align 4
-  %s4 = getelementptr <2 x i8>, <2 x i8>* %src, i32 3
-  %l4 = load <2 x i8>, <2 x i8>* %s4, align 4
+  %l1 = load <2 x i8>, ptr %src, align 4
+  %s2 = getelementptr <2 x i8>, ptr %src, i32 1
+  %l2 = load <2 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i8>, ptr %src, i32 2
+  %l3 = load <2 x i8>, ptr %s3, align 4
+  %s4 = getelementptr <2 x i8>, ptr %src, i32 3
+  %l4 = load <2 x i8>, ptr %s4, align 4
   %t1 = shufflevector <2 x i8> %l1, <2 x i8> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i8> %l3, <2 x i8> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x i8> %t1, <4 x i8> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x i8> %s, <8 x i8> *%dst, align 1
+  store <8 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst4_v4i8(<4 x i8> *%src, <16 x i8> *%dst) {
+define void @vst4_v4i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, r7, lr}
@@ -565,22 +554,21 @@ define void @vst4_v4i8(<4 x i8> *%src, <16 x i8> *%dst) {
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, pc}
 entry:
-  %s1 = getelementptr <4 x i8>, <4 x i8>* %src, i32 0
-  %l1 = load <4 x i8>, <4 x i8>* %s1, align 4
-  %s2 = getelementptr <4 x i8>, <4 x i8>* %src, i32 1
-  %l2 = load <4 x i8>, <4 x i8>* %s2, align 4
-  %s3 = getelementptr <4 x i8>, <4 x i8>* %src, i32 2
-  %l3 = load <4 x i8>, <4 x i8>* %s3, align 4
-  %s4 = getelementptr <4 x i8>, <4 x i8>* %src, i32 3
-  %l4 = load <4 x i8>, <4 x i8>* %s4, align 4
+  %l1 = load <4 x i8>, ptr %src, align 4
+  %s2 = getelementptr <4 x i8>, ptr %src, i32 1
+  %l2 = load <4 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i8>, ptr %src, i32 2
+  %l3 = load <4 x i8>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i8>, ptr %src, i32 3
+  %l4 = load <4 x i8>, ptr %s4, align 4
   %t1 = shufflevector <4 x i8> %l1, <4 x i8> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i8> %l3, <4 x i8> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i8> %t1, <8 x i8> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i8> %s, <16 x i8> *%dst, align 1
+  store <16 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) {
+define void @vst4_v8i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -658,22 +646,21 @@ define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x i8>, <8 x i8>* %src, i32 0
-  %l1 = load <8 x i8>, <8 x i8>* %s1, align 4
-  %s2 = getelementptr <8 x i8>, <8 x i8>* %src, i32 1
-  %l2 = load <8 x i8>, <8 x i8>* %s2, align 4
-  %s3 = getelementptr <8 x i8>, <8 x i8>* %src, i32 2
-  %l3 = load <8 x i8>, <8 x i8>* %s3, align 4
-  %s4 = getelementptr <8 x i8>, <8 x i8>* %src, i32 3
-  %l4 = load <8 x i8>, <8 x i8>* %s4, align 4
+  %l1 = load <8 x i8>, ptr %src, align 4
+  %s2 = getelementptr <8 x i8>, ptr %src, i32 1
+  %l2 = load <8 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <8 x i8>, ptr %src, i32 2
+  %l3 = load <8 x i8>, ptr %s3, align 4
+  %s4 = getelementptr <8 x i8>, ptr %src, i32 3
+  %l4 = load <8 x i8>, ptr %s4, align 4
   %t1 = shufflevector <8 x i8> %l1, <8 x i8> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x i8> %l3, <8 x i8> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x i8> %t1, <16 x i8> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x i8> %s, <32 x i8> *%dst, align 1
+  store <32 x i8> %s, ptr %dst, align 1
   ret void
 }
 
-define void @vst4_v16i8(<16 x i8> *%src, <64 x i8> *%dst) {
+define void @vst4_v16i8(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -686,24 +673,23 @@ define void @vst4_v16i8(<16 x i8> *%src, <64 x i8> *%dst) {
 ; CHECK-NEXT:    vst43.8 {q0, q1, q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x i8>, <16 x i8>* %src, i32 0
-  %l1 = load <16 x i8>, <16 x i8>* %s1, align 4
-  %s2 = getelementptr <16 x i8>, <16 x i8>* %src, i32 1
-  %l2 = load <16 x i8>, <16 x i8>* %s2, align 4
-  %s3 = getelementptr <16 x i8>, <16 x i8>* %src, i32 2
-  %l3 = load <16 x i8>, <16 x i8>* %s3, align 4
-  %s4 = getelementptr <16 x i8>, <16 x i8>* %src, i32 3
-  %l4 = load <16 x i8>, <16 x i8>* %s4, align 4
+  %l1 = load <16 x i8>, ptr %src, align 4
+  %s2 = getelementptr <16 x i8>, ptr %src, i32 1
+  %l2 = load <16 x i8>, ptr %s2, align 4
+  %s3 = getelementptr <16 x i8>, ptr %src, i32 2
+  %l3 = load <16 x i8>, ptr %s3, align 4
+  %s4 = getelementptr <16 x i8>, ptr %src, i32 3
+  %l4 = load <16 x i8>, ptr %s4, align 4
   %t1 = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x i8> %l3, <16 x i8> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x i8> %t1, <32 x i8> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x i8> %s, <64 x i8> *%dst, align 1
+  store <64 x i8> %s, ptr %dst, align 1
   ret void
 }
 
 ; i64
 
-define void @vst4_v2i64(<2 x i64> *%src, <8 x i64> *%dst) {
+define void @vst4_v2i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -725,22 +711,21 @@ define void @vst4_v2i64(<2 x i64> *%src, <8 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0
-  %l1 = load <2 x i64>, <2 x i64>* %s1, align 4
-  %s2 = getelementptr <2 x i64>, <2 x i64>* %src, i32 1
-  %l2 = load <2 x i64>, <2 x i64>* %s2, align 4
-  %s3 = getelementptr <2 x i64>, <2 x i64>* %src, i32 2
-  %l3 = load <2 x i64>, <2 x i64>* %s3, align 4
-  %s4 = getelementptr <2 x i64>, <2 x i64>* %src, i32 3
-  %l4 = load <2 x i64>, <2 x i64>* %s4, align 4
+  %l1 = load <2 x i64>, ptr %src, align 4
+  %s2 = getelementptr <2 x i64>, ptr %src, i32 1
+  %l2 = load <2 x i64>, ptr %s2, align 4
+  %s3 = getelementptr <2 x i64>, ptr %src, i32 2
+  %l3 = load <2 x i64>, ptr %s3, align 4
+  %s4 = getelementptr <2 x i64>, ptr %src, i32 3
+  %l4 = load <2 x i64>, ptr %s4, align 4
   %t1 = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x i64> %l3, <2 x i64> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x i64> %t1, <4 x i64> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x i64> %s, <8 x i64> *%dst, align 8
+  store <8 x i64> %s, ptr %dst, align 8
   ret void
 }
 
-define void @vst4_v4i64(<4 x i64> *%src, <16 x i64> *%dst) {
+define void @vst4_v4i64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4i64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -787,24 +772,23 @@ define void @vst4_v4i64(<4 x i64> *%src, <16 x i64> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x i64>, <4 x i64>* %src, i32 0
-  %l1 = load <4 x i64>, <4 x i64>* %s1, align 4
-  %s2 = getelementptr <4 x i64>, <4 x i64>* %src, i32 1
-  %l2 = load <4 x i64>, <4 x i64>* %s2, align 4
-  %s3 = getelementptr <4 x i64>, <4 x i64>* %src, i32 2
-  %l3 = load <4 x i64>, <4 x i64>* %s3, align 4
-  %s4 = getelementptr <4 x i64>, <4 x i64>* %src, i32 3
-  %l4 = load <4 x i64>, <4 x i64>* %s4, align 4
+  %l1 = load <4 x i64>, ptr %src, align 4
+  %s2 = getelementptr <4 x i64>, ptr %src, i32 1
+  %l2 = load <4 x i64>, ptr %s2, align 4
+  %s3 = getelementptr <4 x i64>, ptr %src, i32 2
+  %l3 = load <4 x i64>, ptr %s3, align 4
+  %s4 = getelementptr <4 x i64>, ptr %src, i32 3
+  %l4 = load <4 x i64>, ptr %s4, align 4
   %t1 = shufflevector <4 x i64> %l1, <4 x i64> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x i64> %l3, <4 x i64> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x i64> %t1, <8 x i64> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i64> %s, <16 x i64> *%dst, align 8
+  store <16 x i64> %s, ptr %dst, align 8
   ret void
 }
 
 ; f32
 
-define void @vst4_v2f32(<2 x float> *%src, <8 x float> *%dst) {
+define void @vst4_v2f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldr s0, [r0]
@@ -819,22 +803,21 @@ define void @vst4_v2f32(<2 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x float>, <2 x float>* %src, i32 0
-  %l1 = load <2 x float>, <2 x float>* %s1, align 4
-  %s2 = getelementptr <2 x float>, <2 x float>* %src, i32 1
-  %l2 = load <2 x float>, <2 x float>* %s2, align 4
-  %s3 = getelementptr <2 x float>, <2 x float>* %src, i32 2
-  %l3 = load <2 x float>, <2 x float>* %s3, align 4
-  %s4 = getelementptr <2 x float>, <2 x float>* %src, i32 3
-  %l4 = load <2 x float>, <2 x float>* %s4, align 4
+  %l1 = load <2 x float>, ptr %src, align 4
+  %s2 = getelementptr <2 x float>, ptr %src, i32 1
+  %l2 = load <2 x float>, ptr %s2, align 4
+  %s3 = getelementptr <2 x float>, ptr %src, i32 2
+  %l3 = load <2 x float>, ptr %s3, align 4
+  %s4 = getelementptr <2 x float>, ptr %src, i32 3
+  %l4 = load <2 x float>, ptr %s4, align 4
   %t1 = shufflevector <2 x float> %l1, <2 x float> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x float> %l3, <2 x float> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x float> %t1, <4 x float> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x float> %s, <8 x float> *%dst, align 4
+  store <8 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v4f32(<4 x float> *%src, <16 x float> *%dst) {
+define void @vst4_v4f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -847,22 +830,21 @@ define void @vst4_v4f32(<4 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vst43.32 {q0, q1, q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
-  %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2
-  %l3 = load <4 x float>, <4 x float>* %s3, align 4
-  %s4 = getelementptr <4 x float>, <4 x float>* %src, i32 3
-  %l4 = load <4 x float>, <4 x float>* %s4, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
+  %s3 = getelementptr <4 x float>, ptr %src, i32 2
+  %l3 = load <4 x float>, ptr %s3, align 4
+  %s4 = getelementptr <4 x float>, ptr %src, i32 3
+  %l4 = load <4 x float>, ptr %s4, align 4
   %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x float> %l3, <4 x float> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x float> %t1, <8 x float> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x float> %s, <16 x float> *%dst, align 4
+  store <16 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v8f32(<8 x float> *%src, <32 x float> *%dst) {
+define void @vst4_v8f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -886,22 +868,21 @@ define void @vst4_v8f32(<8 x float> *%src, <32 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x float>, <8 x float>* %src, i32 0
-  %l1 = load <8 x float>, <8 x float>* %s1, align 4
-  %s2 = getelementptr <8 x float>, <8 x float>* %src, i32 1
-  %l2 = load <8 x float>, <8 x float>* %s2, align 4
-  %s3 = getelementptr <8 x float>, <8 x float>* %src, i32 2
-  %l3 = load <8 x float>, <8 x float>* %s3, align 4
-  %s4 = getelementptr <8 x float>, <8 x float>* %src, i32 3
-  %l4 = load <8 x float>, <8 x float>* %s4, align 4
+  %l1 = load <8 x float>, ptr %src, align 4
+  %s2 = getelementptr <8 x float>, ptr %src, i32 1
+  %l2 = load <8 x float>, ptr %s2, align 4
+  %s3 = getelementptr <8 x float>, ptr %src, i32 2
+  %l3 = load <8 x float>, ptr %s3, align 4
+  %s4 = getelementptr <8 x float>, ptr %src, i32 3
+  %l4 = load <8 x float>, ptr %s4, align 4
   %t1 = shufflevector <8 x float> %l1, <8 x float> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x float> %l3, <8 x float> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x float> %t1, <16 x float> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x float> %s, <32 x float> *%dst, align 4
+  store <32 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v16f32(<16 x float> *%src, <64 x float> *%dst) {
+define void @vst4_v16f32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5}
@@ -965,22 +946,21 @@ define void @vst4_v16f32(<16 x float> *%src, <64 x float> *%dst) {
 ; CHECK-NEXT:    pop {r4, r5}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x float>, <16 x float>* %src, i32 0
-  %l1 = load <16 x float>, <16 x float>* %s1, align 4
-  %s2 = getelementptr <16 x float>, <16 x float>* %src, i32 1
-  %l2 = load <16 x float>, <16 x float>* %s2, align 4
-  %s3 = getelementptr <16 x float>, <16 x float>* %src, i32 2
-  %l3 = load <16 x float>, <16 x float>* %s3, align 4
-  %s4 = getelementptr <16 x float>, <16 x float>* %src, i32 3
-  %l4 = load <16 x float>, <16 x float>* %s4, align 4
+  %l1 = load <16 x float>, ptr %src, align 4
+  %s2 = getelementptr <16 x float>, ptr %src, i32 1
+  %l2 = load <16 x float>, ptr %s2, align 4
+  %s3 = getelementptr <16 x float>, ptr %src, i32 2
+  %l3 = load <16 x float>, ptr %s3, align 4
+  %s4 = getelementptr <16 x float>, ptr %src, i32 3
+  %l4 = load <16 x float>, ptr %s4, align 4
   %t1 = shufflevector <16 x float> %l1, <16 x float> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x float> %l3, <16 x float> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x float> %t1, <32 x float> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x float> %s, <64 x float> *%dst, align 4
+  store <64 x float> %s, ptr %dst, align 4
   ret void
 }
 
-define void @vst4_v4f32_align1(<4 x float> *%src, <16 x float> *%dst) {
+define void @vst4_v4f32_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4f32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
@@ -1011,24 +991,23 @@ define void @vst4_v4f32_align1(<4 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0
-  %l1 = load <4 x float>, <4 x float>* %s1, align 4
-  %s2 = getelementptr <4 x float>, <4 x float>* %src, i32 1
-  %l2 = load <4 x float>, <4 x float>* %s2, align 4
-  %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2
-  %l3 = load <4 x float>, <4 x float>* %s3, align 4
-  %s4 = getelementptr <4 x float>, <4 x float>* %src, i32 3
-  %l4 = load <4 x float>, <4 x float>* %s4, align 4
+  %l1 = load <4 x float>, ptr %src, align 4
+  %s2 = getelementptr <4 x float>, ptr %src, i32 1
+  %l2 = load <4 x float>, ptr %s2, align 4
+  %s3 = getelementptr <4 x float>, ptr %src, i32 2
+  %l3 = load <4 x float>, ptr %s3, align 4
+  %s4 = getelementptr <4 x float>, ptr %src, i32 3
+  %l4 = load <4 x float>, ptr %s4, align 4
   %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x float> %l3, <4 x float> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x float> %t1, <8 x float> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x float> %s, <16 x float> *%dst, align 1
+  store <16 x float> %s, ptr %dst, align 1
   ret void
 }
 
 ; f16
 
-define void @vst4_v2f16(<2 x half> *%src, <8 x half> *%dst) {
+define void @vst4_v2f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldm.w r0, {r2, r3, r12}
@@ -1049,22 +1028,21 @@ define void @vst4_v2f16(<2 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vstrh.16 q0, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x half>, <2 x half>* %src, i32 0
-  %l1 = load <2 x half>, <2 x half>* %s1, align 4
-  %s2 = getelementptr <2 x half>, <2 x half>* %src, i32 1
-  %l2 = load <2 x half>, <2 x half>* %s2, align 4
-  %s3 = getelementptr <2 x half>, <2 x half>* %src, i32 2
-  %l3 = load <2 x half>, <2 x half>* %s3, align 4
-  %s4 = getelementptr <2 x half>, <2 x half>* %src, i32 3
-  %l4 = load <2 x half>, <2 x half>* %s4, align 4
+  %l1 = load <2 x half>, ptr %src, align 4
+  %s2 = getelementptr <2 x half>, ptr %src, i32 1
+  %l2 = load <2 x half>, ptr %s2, align 4
+  %s3 = getelementptr <2 x half>, ptr %src, i32 2
+  %l3 = load <2 x half>, ptr %s3, align 4
+  %s4 = getelementptr <2 x half>, ptr %src, i32 3
+  %l4 = load <2 x half>, ptr %s4, align 4
   %t1 = shufflevector <2 x half> %l1, <2 x half> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x half> %l3, <2 x half> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x half> %t1, <4 x half> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x half> %s, <8 x half> *%dst, align 2
+  store <8 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v4f16(<4 x half> *%src, <16 x half> *%dst) {
+define void @vst4_v4f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r6, lr}
@@ -1103,22 +1081,21 @@ define void @vst4_v4f16(<4 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vstrh.16 q1, [r1]
 ; CHECK-NEXT:    pop {r4, r5, r6, pc}
 entry:
-  %s1 = getelementptr <4 x half>, <4 x half>* %src, i32 0
-  %l1 = load <4 x half>, <4 x half>* %s1, align 4
-  %s2 = getelementptr <4 x half>, <4 x half>* %src, i32 1
-  %l2 = load <4 x half>, <4 x half>* %s2, align 4
-  %s3 = getelementptr <4 x half>, <4 x half>* %src, i32 2
-  %l3 = load <4 x half>, <4 x half>* %s3, align 4
-  %s4 = getelementptr <4 x half>, <4 x half>* %src, i32 3
-  %l4 = load <4 x half>, <4 x half>* %s4, align 4
+  %l1 = load <4 x half>, ptr %src, align 4
+  %s2 = getelementptr <4 x half>, ptr %src, i32 1
+  %l2 = load <4 x half>, ptr %s2, align 4
+  %s3 = getelementptr <4 x half>, ptr %src, i32 2
+  %l3 = load <4 x half>, ptr %s3, align 4
+  %s4 = getelementptr <4 x half>, ptr %src, i32 3
+  %l4 = load <4 x half>, ptr %s4, align 4
   %t1 = shufflevector <4 x half> %l1, <4 x half> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x half> %l3, <4 x half> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x half> %t1, <8 x half> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x half> %s, <16 x half> *%dst, align 2
+  store <16 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v8f16(<8 x half> *%src, <32 x half> *%dst) {
+define void @vst4_v8f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
@@ -1131,22 +1108,21 @@ define void @vst4_v8f16(<8 x half> *%src, <32 x half> *%dst) {
 ; CHECK-NEXT:    vst43.16 {q0, q1, q2, q3}, [r1]
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
-  %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2
-  %l3 = load <8 x half>, <8 x half>* %s3, align 4
-  %s4 = getelementptr <8 x half>, <8 x half>* %src, i32 3
-  %l4 = load <8 x half>, <8 x half>* %s4, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
+  %s3 = getelementptr <8 x half>, ptr %src, i32 2
+  %l3 = load <8 x half>, ptr %s3, align 4
+  %s4 = getelementptr <8 x half>, ptr %src, i32 3
+  %l4 = load <8 x half>, ptr %s4, align 4
   %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x half> %l3, <8 x half> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x half> %t1, <16 x half> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x half> %s, <32 x half> *%dst, align 2
+  store <32 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v16f16(<16 x half> *%src, <64 x half> *%dst) {
+define void @vst4_v16f16(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v16f16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1170,22 +1146,21 @@ define void @vst4_v16f16(<16 x half> *%src, <64 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <16 x half>, <16 x half>* %src, i32 0
-  %l1 = load <16 x half>, <16 x half>* %s1, align 4
-  %s2 = getelementptr <16 x half>, <16 x half>* %src, i32 1
-  %l2 = load <16 x half>, <16 x half>* %s2, align 4
-  %s3 = getelementptr <16 x half>, <16 x half>* %src, i32 2
-  %l3 = load <16 x half>, <16 x half>* %s3, align 4
-  %s4 = getelementptr <16 x half>, <16 x half>* %src, i32 3
-  %l4 = load <16 x half>, <16 x half>* %s4, align 4
+  %l1 = load <16 x half>, ptr %src, align 4
+  %s2 = getelementptr <16 x half>, ptr %src, i32 1
+  %l2 = load <16 x half>, ptr %s2, align 4
+  %s3 = getelementptr <16 x half>, ptr %src, i32 2
+  %l3 = load <16 x half>, ptr %s3, align 4
+  %s4 = getelementptr <16 x half>, ptr %src, i32 3
+  %l4 = load <16 x half>, ptr %s4, align 4
   %t1 = shufflevector <16 x half> %l1, <16 x half> %l2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %t2 = shufflevector <16 x half> %l3, <16 x half> %l4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %s = shufflevector <32 x half> %t1, <32 x half> %t2, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
-  store <64 x half> %s, <64 x half> *%dst, align 2
+  store <64 x half> %s, ptr %dst, align 2
   ret void
 }
 
-define void @vst4_v8f16_align1(<8 x half> *%src, <32 x half> *%dst) {
+define void @vst4_v8f16_align1(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v8f16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d9, d10, d11, d12, d13}
@@ -1245,24 +1220,23 @@ define void @vst4_v8f16_align1(<8 x half> *%src, <32 x half> *%dst) {
 ; CHECK-NEXT:    vpop {d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <8 x half>, <8 x half>* %src, i32 0
-  %l1 = load <8 x half>, <8 x half>* %s1, align 4
-  %s2 = getelementptr <8 x half>, <8 x half>* %src, i32 1
-  %l2 = load <8 x half>, <8 x half>* %s2, align 4
-  %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2
-  %l3 = load <8 x half>, <8 x half>* %s3, align 4
-  %s4 = getelementptr <8 x half>, <8 x half>* %src, i32 3
-  %l4 = load <8 x half>, <8 x half>* %s4, align 4
+  %l1 = load <8 x half>, ptr %src, align 4
+  %s2 = getelementptr <8 x half>, ptr %src, i32 1
+  %l2 = load <8 x half>, ptr %s2, align 4
+  %s3 = getelementptr <8 x half>, ptr %src, i32 2
+  %l3 = load <8 x half>, ptr %s3, align 4
+  %s4 = getelementptr <8 x half>, ptr %src, i32 3
+  %l4 = load <8 x half>, ptr %s4, align 4
   %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %t2 = shufflevector <8 x half> %l3, <8 x half> %l4, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %s = shufflevector <16 x half> %t1, <16 x half> %t2, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
-  store <32 x half> %s, <32 x half> *%dst, align 1
+  store <32 x half> %s, ptr %dst, align 1
   ret void
 }
 
 ; f64
 
-define void @vst4_v2f64(<2 x double> *%src, <8 x double> *%dst) {
+define void @vst4_v2f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
@@ -1284,22 +1258,21 @@ define void @vst4_v2f64(<2 x double> *%src, <8 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0
-  %l1 = load <2 x double>, <2 x double>* %s1, align 4
-  %s2 = getelementptr <2 x double>, <2 x double>* %src, i32 1
-  %l2 = load <2 x double>, <2 x double>* %s2, align 4
-  %s3 = getelementptr <2 x double>, <2 x double>* %src, i32 2
-  %l3 = load <2 x double>, <2 x double>* %s3, align 4
-  %s4 = getelementptr <2 x double>, <2 x double>* %src, i32 3
-  %l4 = load <2 x double>, <2 x double>* %s4, align 4
+  %l1 = load <2 x double>, ptr %src, align 4
+  %s2 = getelementptr <2 x double>, ptr %src, i32 1
+  %l2 = load <2 x double>, ptr %s2, align 4
+  %s3 = getelementptr <2 x double>, ptr %src, i32 2
+  %l3 = load <2 x double>, ptr %s3, align 4
+  %s4 = getelementptr <2 x double>, ptr %src, i32 3
+  %l4 = load <2 x double>, ptr %s4, align 4
   %t1 = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %t2 = shufflevector <2 x double> %l3, <2 x double> %l4, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %s = shufflevector <4 x double> %t1, <4 x double> %t2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
-  store <8 x double> %s, <8 x double> *%dst, align 8
+  store <8 x double> %s, ptr %dst, align 8
   ret void
 }
 
-define void @vst4_v4f64(<4 x double> *%src, <16 x double> *%dst) {
+define void @vst4_v4f64(ptr %src, ptr %dst) {
 ; CHECK-LABEL: vst4_v4f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1346,17 +1319,16 @@ define void @vst4_v4f64(<4 x double> *%src, <16 x double> *%dst) {
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
-  %s1 = getelementptr <4 x double>, <4 x double>* %src, i32 0
-  %l1 = load <4 x double>, <4 x double>* %s1, align 4
-  %s2 = getelementptr <4 x double>, <4 x double>* %src, i32 1
-  %l2 = load <4 x double>, <4 x double>* %s2, align 4
-  %s3 = getelementptr <4 x double>, <4 x double>* %src, i32 2
-  %l3 = load <4 x double>, <4 x double>* %s3, align 4
-  %s4 = getelementptr <4 x double>, <4 x double>* %src, i32 3
-  %l4 = load <4 x double>, <4 x double>* %s4, align 4
+  %l1 = load <4 x double>, ptr %src, align 4
+  %s2 = getelementptr <4 x double>, ptr %src, i32 1
+  %l2 = load <4 x double>, ptr %s2, align 4
+  %s3 = getelementptr <4 x double>, ptr %src, i32 2
+  %l3 = load <4 x double>, ptr %s3, align 4
+  %s4 = getelementptr <4 x double>, ptr %src, i32 3
+  %l4 = load <4 x double>, ptr %s4, align 4
   %t1 = shufflevector <4 x double> %l1, <4 x double> %l2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %t2 = shufflevector <4 x double> %l3, <4 x double> %l4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s = shufflevector <8 x double> %t1, <8 x double> %t2, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x double> %s, <16 x double> *%dst, align 8
+  store <16 x double> %s, ptr %dst, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll b/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
index 1eb9e8bd7c8d9..bea76276e2858 100644
--- a/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
@@ -2,47 +2,47 @@
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LE
 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-BE
 
-define void @foo_int8_int32(<4 x i8>* %dest, <4 x i32>* readonly %src, i32 %n) {
+define void @foo_int8_int32(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int8_int32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrb.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i32>, <4 x i32>* %src, align 4
+  %wide.load = load <4 x i32>, ptr %src, align 4
   %0 = trunc <4 x i32> %wide.load to <4 x i8>
-  store <4 x i8> %0, <4 x i8>* %dest, align 1
+  store <4 x i8> %0, ptr %dest, align 1
   ret void
 }
 
-define void @foo_int16_int32(<4 x i16>* %dest, <4 x i32>* readonly %src, i32 %n) {
+define void @foo_int16_int32(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int16_int32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i32>, <4 x i32>* %src, align 4
+  %wide.load = load <4 x i32>, ptr %src, align 4
   %0 = trunc <4 x i32> %wide.load to <4 x i16>
-  store <4 x i16> %0, <4 x i16>* %dest, align 2
+  store <4 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_int8_int16(<8 x i8>* %dest, <8 x i16>* readonly %src, i32 %n) {
+define void @foo_int8_int16(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int8_int16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i16>, <8 x i16>* %src, align 2
+  %wide.load = load <8 x i16>, ptr %src, align 2
   %0 = trunc <8 x i16> %wide.load to <8 x i8>
-  store <8 x i8> %0, <8 x i8>* %dest, align 1
+  store <8 x i8> %0, ptr %dest, align 1
   ret void
 }
 
 
-define void @foo_int8_int32_double(<8 x i8>* %dest, <8 x i32>* readonly %src, i32 %n) {
+define void @foo_int8_int32_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LE-LABEL: foo_int8_int32_double:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    vldrh.u16 q1, [r1]
@@ -83,13 +83,13 @@ define void @foo_int8_int32_double(<8 x i8>* %dest, <8 x i32>* readonly %src, i3
 ; CHECK-BE-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i32>, <8 x i32>* %src, align 2
+  %wide.load = load <8 x i32>, ptr %src, align 2
   %0 = trunc <8 x i32> %wide.load to <8 x i8>
-  store <8 x i8> %0, <8 x i8>* %dest, align 1
+  store <8 x i8> %0, ptr %dest, align 1
   ret void
 }
 
-define void @foo_int16_int32_double(<8 x i16>* %dest, <8 x i32>* readonly %src, i32 %n) {
+define void @foo_int16_int32_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int16_int32_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -98,13 +98,13 @@ define void @foo_int16_int32_double(<8 x i16>* %dest, <8 x i32>* readonly %src,
 ; CHECK-NEXT:    vstrh.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i32>, <8 x i32>* %src, align 4
+  %wide.load = load <8 x i32>, ptr %src, align 4
   %0 = trunc <8 x i32> %wide.load to <8 x i16>
-  store <8 x i16> %0, <8 x i16>* %dest, align 2
+  store <8 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_int8_int16_double(<16 x i8>* %dest, <16 x i16>* readonly %src, i32 %n) {
+define void @foo_int8_int16_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int8_int16_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u16 q0, [r1]
@@ -113,13 +113,13 @@ define void @foo_int8_int16_double(<16 x i8>* %dest, <16 x i16>* readonly %src,
 ; CHECK-NEXT:    vstrb.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i16>, <16 x i16>* %src, align 2
+  %wide.load = load <16 x i16>, ptr %src, align 2
   %0 = trunc <16 x i16> %wide.load to <16 x i8>
-  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  store <16 x i8> %0, ptr %dest, align 1
   ret void
 }
 
-define void @foo_int8_int32_quad(<16 x i8>* %dest, <16 x i32>* readonly %src, i32 %n) {
+define void @foo_int8_int32_quad(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int8_int32_quad:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrw.u32 q0, [r1]
@@ -132,53 +132,53 @@ define void @foo_int8_int32_quad(<16 x i8>* %dest, <16 x i32>* readonly %src, i3
 ; CHECK-NEXT:    vstrb.32 q2, [r0, #8]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i32>, <16 x i32>* %src, align 4
+  %wide.load = load <16 x i32>, ptr %src, align 4
   %0 = trunc <16 x i32> %wide.load to <16 x i8>
-  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  store <16 x i8> %0, ptr %dest, align 1
   ret void
 }
 
 
-define void @foo_int32_int8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
+define void @foo_int32_int8(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i8>, <4 x i8>* %src, align 1
+  %wide.load = load <4 x i8>, ptr %src, align 1
   %0 = sext <4 x i8> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_int16_int8(<8 x i16>* %dest, <8 x i8>* readonly %src, i32 %n) {
+define void @foo_int16_int8(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int16_int8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %wide.load = load <8 x i8>, ptr %src, align 1
   %0 = sext <8 x i8> %wide.load to <8 x i16>
-  store <8 x i16> %0, <8 x i16>* %dest, align 2
+  store <8 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_int32_int16(<4 x i32>* %dest, <4 x i16>* readonly %src, i32 %n) {
+define void @foo_int32_int16(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i16>, <4 x i16>* %src, align 2
+  %wide.load = load <4 x i16>, ptr %src, align 2
   %0 = sext <4 x i16> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_int32_int8_double(<8 x i32>* %dest, <8 x i8>* readonly %src, i32 %n) {
+define void @foo_int32_int8_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r1]
@@ -187,13 +187,13 @@ define void @foo_int32_int8_double(<8 x i32>* %dest, <8 x i8>* readonly %src, i3
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %wide.load = load <8 x i8>, ptr %src, align 1
   %0 = sext <8 x i8> %wide.load to <8 x i32>
-  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  store <8 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_int16_int8_double(<16 x i16>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_int16_int8_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int16_int8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s16 q0, [r1]
@@ -202,13 +202,13 @@ define void @foo_int16_int8_double(<16 x i16>* %dest, <16 x i8>* readonly %src,
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %wide.load = load <16 x i8>, ptr %src, align 1
   %0 = sext <16 x i8> %wide.load to <16 x i16>
-  store <16 x i16> %0, <16 x i16>* %dest, align 2
+  store <16 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_int32_int16_double(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+define void @foo_int32_int16_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int16_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1]
@@ -217,13 +217,13 @@ define void @foo_int32_int16_double(<8 x i32>* %dest, <8 x i16>* readonly %src,
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i16>, <8 x i16>* %src, align 2
+  %wide.load = load <8 x i16>, ptr %src, align 2
   %0 = sext <8 x i16> %wide.load to <8 x i32>
-  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  store <8 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_int32_int8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_int32_int8_quad(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8_quad:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r1]
@@ -236,54 +236,54 @@ define void @foo_int32_int8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src, i3
 ; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %wide.load = load <16 x i8>, ptr %src, align 1
   %0 = sext <16 x i8> %wide.load to <16 x i32>
-  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  store <16 x i32> %0, ptr %dest, align 4
   ret void
 }
 
 
-define void @foo_uint32_uint8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
+define void @foo_uint32_uint8(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i8>, <4 x i8>* %src, align 1
+  %wide.load = load <4 x i8>, ptr %src, align 1
   %0 = zext <4 x i8> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_uint16_uint8(<8 x i16>* %dest, <8 x i8>* readonly %src, i32 %n) {
+define void @foo_uint16_uint8(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint16_uint8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %wide.load = load <8 x i8>, ptr %src, align 1
   %0 = zext <8 x i8> %wide.load to <8 x i16>
-  store <8 x i16> %0, <8 x i16>* %dest, align 2
+  store <8 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_uint32_uint16(<4 x i32>* %dest, <4 x i16>* readonly %src, i32 %n) {
+define void @foo_uint32_uint16(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i16>, <4 x i16>* %src, align 2
+  %wide.load = load <4 x i16>, ptr %src, align 2
   %0 = zext <4 x i16> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }
 
 
-define void @foo_uint32_uint8_double(<8 x i32>* %dest, <8 x i8>* readonly %src, i32 %n) {
+define void @foo_uint32_uint8_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -292,13 +292,13 @@ define void @foo_uint32_uint8_double(<8 x i32>* %dest, <8 x i8>* readonly %src,
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %wide.load = load <8 x i8>, ptr %src, align 1
   %0 = zext <8 x i8> %wide.load to <8 x i32>
-  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  store <8 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_uint16_uint8_double(<16 x i16>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_uint16_uint8_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint16_uint8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u16 q0, [r1]
@@ -307,13 +307,13 @@ define void @foo_uint16_uint8_double(<16 x i16>* %dest, <16 x i8>* readonly %src
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %wide.load = load <16 x i8>, ptr %src, align 1
   %0 = zext <16 x i8> %wide.load to <16 x i16>
-  store <16 x i16> %0, <16 x i16>* %dest, align 2
+  store <16 x i16> %0, ptr %dest, align 2
   ret void
 }
 
-define void @foo_uint32_uint16_double(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+define void @foo_uint32_uint16_double(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint16_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.u32 q0, [r1]
@@ -322,13 +322,13 @@ define void @foo_uint32_uint16_double(<8 x i32>* %dest, <8 x i16>* readonly %src
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <8 x i16>, <8 x i16>* %src, align 2
+  %wide.load = load <8 x i16>, ptr %src, align 2
   %0 = zext <8 x i16> %wide.load to <8 x i32>
-  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  store <8 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_uint32_uint8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_uint32_uint8_quad(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8_quad:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
@@ -341,14 +341,14 @@ define void @foo_uint32_uint8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src,
 ; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %wide.load = load <16 x i8>, ptr %src, align 1
   %0 = zext <16 x i8> %wide.load to <16 x i32>
-  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  store <16 x i32> %0, ptr %dest, align 4
   ret void
 }
 
 
-define void @foo_int32_int8_both(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_int32_int8_both(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LE-LABEL: foo_int32_int8_both:
 ; CHECK-LE:       @ %bb.0: @ %entry
 ; CHECK-LE-NEXT:    .pad #32
@@ -391,14 +391,14 @@ define void @foo_int32_int8_both(<16 x i32>* %dest, <16 x i8>* readonly %src, i3
 ; CHECK-BE-NEXT:    add sp, #32
 ; CHECK-BE-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %wide.load = load <16 x i8>, ptr %src, align 1
   %0 = sext <16 x i8> %wide.load to <16 x i16>
   %1 = zext <16 x i16> %0 to <16 x i32>
-  store <16 x i32> %1, <16 x i32>* %dest, align 4
+  store <16 x i32> %1, ptr %dest, align 4
   ret void
 }
 
-define <8 x i16>* @foo_uint32_uint16_double_offset(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+define ptr @foo_uint32_uint16_double_offset(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint16_double_offset:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1, #16]!
@@ -408,14 +408,14 @@ define <8 x i16>* @foo_uint32_uint16_double_offset(<8 x i32>* %dest, <8 x i16>*
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds <8 x i16>, <8 x i16>* %src, i32 1
-  %wide.load = load <8 x i16>, <8 x i16>* %z, align 2
+  %z = getelementptr inbounds <8 x i16>, ptr %src, i32 1
+  %wide.load = load <8 x i16>, ptr %z, align 2
   %0 = sext <8 x i16> %wide.load to <8 x i32>
-  store <8 x i32> %0, <8 x i32>* %dest, align 4
-  ret <8 x i16>* %z
+  store <8 x i32> %0, ptr %dest, align 4
+  ret ptr %z
 }
 
-define <16 x i16>* @foo_uint32_uint16_quad_offset(<16 x i32>* %dest, <16 x i16>* readonly %src, i32 %n) {
+define ptr @foo_uint32_uint16_quad_offset(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint16_quad_offset:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrh.s32 q0, [r1, #32]!
@@ -429,15 +429,15 @@ define <16 x i16>* @foo_uint32_uint16_quad_offset(<16 x i32>* %dest, <16 x i16>*
 ; CHECK-NEXT:    mov r0, r1
 ; CHECK-NEXT:    bx lr
 entry:
-  %z = getelementptr inbounds <16 x i16>, <16 x i16>* %src, i32 1
-  %wide.load = load <16 x i16>, <16 x i16>* %z, align 2
+  %z = getelementptr inbounds <16 x i16>, ptr %src, i32 1
+  %wide.load = load <16 x i16>, ptr %z, align 2
   %0 = sext <16 x i16> %wide.load to <16 x i32>
-  store <16 x i32> %0, <16 x i32>* %dest, align 4
-  ret <16 x i16>* %z
+  store <16 x i32> %0, ptr %dest, align 4
+  ret ptr %z
 }
 
 
-define void @foo_int16_int32_align1(<4 x i16>* %dest, <4 x i32>* readonly %src, i32 %n) {
+define void @foo_int16_int32_align1(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int16_int32_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -451,13 +451,13 @@ define void @foo_int16_int32_align1(<4 x i16>* %dest, <4 x i32>* readonly %src,
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i32>, <4 x i32>* %src, align 4
+  %wide.load = load <4 x i32>, ptr %src, align 4
   %0 = trunc <4 x i32> %wide.load to <4 x i16>
-  store <4 x i16> %0, <4 x i16>* %dest, align 1
+  store <4 x i16> %0, ptr %dest, align 1
   ret void
 }
 
-define void @foo_int32_int16_align1(<4 x i32>* %dest, <4 x i16>* readonly %src, i32 %n) {
+define void @foo_int32_int16_align1(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -471,13 +471,13 @@ define void @foo_int32_int16_align1(<4 x i32>* %dest, <4 x i16>* readonly %src,
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i16>, <4 x i16>* %src, align 1
+  %wide.load = load <4 x i16>, ptr %src, align 1
   %0 = sext <4 x i16> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }
 
-define void @foo_uint32_uint16_align1(<4 x i32>* %dest, <4 x i16>* readonly %src, i32 %n) {
+define void @foo_uint32_uint16_align1(ptr %dest, ptr readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint16_align1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #8
@@ -491,8 +491,8 @@ define void @foo_uint32_uint16_align1(<4 x i32>* %dest, <4 x i16>* readonly %src
 ; CHECK-NEXT:    add sp, #8
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <4 x i16>, <4 x i16>* %src, align 1
+  %wide.load = load <4 x i16>, ptr %src, align 1
   %0 = zext <4 x i16> %wide.load to <4 x i32>
-  store <4 x i32> %0, <4 x i32>* %dest, align 4
+  store <4 x i32> %0, ptr %dest, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-zext-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-zext-masked-load.ll
index 65214ca40d82f..0d7354fcbbf2c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-zext-masked-load.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-zext-masked-load.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs -o - %s | FileCheck %s
 
-define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSrc, <4 x i16> %a) {
+define arm_aapcs_vfpcc <4 x float> @foo_v4i16(ptr nocapture readonly %pSrc, <4 x i16> %a) {
 ; CHECK-LABEL: foo_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s16 q0, q0
@@ -11,12 +11,12 @@ define arm_aapcs_vfpcc <4 x float> @foo_v4i16(<4 x i16>* nocapture readonly %pSr
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <4 x i16> %a, zeroinitializer
-  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %pSrc, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
   %0 = uitofp <4 x i16> %wide.masked.load to <4 x float>
   ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
+define arm_aapcs_vfpcc <8 x half> @foo_v8i8(ptr nocapture readonly %pSrc, i32 %blockSize, <8 x i8> %a) {
 ; CHECK-LABEL: foo_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
@@ -26,12 +26,12 @@ define arm_aapcs_vfpcc <8 x half> @foo_v8i8(<8 x i8>* nocapture readonly %pSrc,
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <8 x i8> %a, zeroinitializer
-  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %pSrc, i32 1, <8 x i1> %active.lane.mask, <8 x i8> undef)
   %0 = uitofp <8 x i8> %wide.masked.load to <8 x half>
   ret <8 x half> %0
 }
 
-define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
+define arm_aapcs_vfpcc <4 x float> @foo_v4i8(ptr nocapture readonly %pSrc, i32 %blockSize, <4 x i8> %a) {
 ; CHECK-LABEL: foo_v4i8:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
@@ -42,12 +42,12 @@ define arm_aapcs_vfpcc <4 x float> @foo_v4i8(<4 x i8>* nocapture readonly %pSrc,
 ; CHECK-NEXT:    bx lr
 entry:
   %active.lane.mask = icmp slt <4 x i8> %a, zeroinitializer
-  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
+  %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %pSrc, i32 1, <4 x i1> %active.lane.mask, <4 x i8> undef)
   %0 = uitofp <4 x i8> %wide.masked.load to <4 x float>
   ret <4 x float> %0
 }
 
-define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
+define arm_aapcs_vfpcc <4 x double> @foo_v4i32(ptr nocapture readonly %pSrc, i32 %blockSize, <4 x i32> %a) {
 ; CHECK-LABEL: foo_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
@@ -87,15 +87,15 @@ define arm_aapcs_vfpcc <4 x double> @foo_v4i32(<4 x i32>* nocapture readonly %pS
 ; CHECK-NEXT:    pop {r4, r5, r7, pc}
 entry:
   %active.lane.mask = icmp slt <4 x i32> %a, zeroinitializer
-  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
+  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %pSrc, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
   %0 = uitofp <4 x i32> %wide.masked.load to <4 x double>
   ret <4 x double> %0
 }
 
-declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
 
-declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32 immarg, <8 x i1>, <8 x i8>)
 
-declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
+declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32 immarg, <4 x i1>, <4 x i8>)
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-indirect-tail-call.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-indirect-tail-call.ll
index 98a5eb5b98aae..9fbc52e0b39f7 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-indirect-tail-call.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-indirect-tail-call.ll
@@ -3,12 +3,12 @@
 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv8.1m.main-arm-unknown-eabi"
 
- at p = hidden local_unnamed_addr global i32 (i32, i32, i32, i32)* null, align 4
+ at p = hidden local_unnamed_addr global ptr null, align 4
 
 define hidden i32 @f(i32 %a, i32 %b, i32 %c, i32 %d) local_unnamed_addr #0 {
 entry:
   %call = tail call i32 @g(i32 %a) #0
-  %0 = load i32 (i32, i32, i32, i32)*, i32 (i32, i32, i32, i32)** @p, align 4
+  %0 = load ptr, ptr @p, align 4
   %call1 = tail call i32 %0(i32 %call, i32 %b, i32 %c, i32 %d) #0
   ret i32 %call1
 }

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll
index ce22d4eb632bd..0b6d318348e3b 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll
@@ -31,19 +31,19 @@ target triple = "armv7m-none-nacl-android"
 
 define hidden i32 @x() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
-  %6 = load volatile i32, i32* @g, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
+  %6 = load volatile i32, ptr @g, align 4
   %div3 = sdiv i32 %5, %6
-  %7 = load volatile i32, i32* @h, align 4
-  %8 = load volatile i32, i32* @i, align 4
+  %7 = load volatile i32, ptr @h, align 4
+  %8 = load volatile i32, ptr @i, align 4
   %add2 = add i32 %div, 1
   %add4 = add i32 %add2, %4
   %add5 = add i32 %add4, %div3
@@ -72,19 +72,19 @@ entry:
 
 define hidden i32 @y() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
-  %6 = load volatile i32, i32* @g, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
+  %6 = load volatile i32, ptr @g, align 4
   %div3 = sdiv i32 %5, %6
-  %7 = load volatile i32, i32* @h, align 4
-  %8 = load volatile i32, i32* @i, align 4
+  %7 = load volatile i32, ptr @h, align 4
+  %8 = load volatile i32, ptr @i, align 4
   %add2 = add i32 %div, 2
   %add4 = add i32 %add2, %4
   %add5 = add i32 %add4, %div3

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-2.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-2.ll
index b2c631ee3cbbd..087e049c6063a 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-2.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-2.ll
@@ -24,12 +24,12 @@ target triple = "thumbv7m-arm-none-eabi"
 
 define hidden i32 @x() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add = add i32 %0, 1
   %add1 = add i32 %add, %1
   %add2 = add i32 %add1, %2
@@ -50,12 +50,12 @@ entry:
 
 define hidden i32 @y() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
   %add = add i32 %0, 2
   %add1 = add i32 %add, %1
   %add2 = add i32 %add1, %2

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
index b1768863b764c..ffc29534df700 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
@@ -24,7 +24,7 @@ target triple = "thumbv7m-arm-none-eabi"
 ;   return 1 + a * (a + b) / (c + d);
 ; }
 
- at _ZTIi = external dso_local constant i8*
+ at _ZTIi = external dso_local constant ptr
 
 define hidden i32 @_Z1hii(i32 %a, i32 %b) local_unnamed_addr #0 {
 entry:
@@ -32,10 +32,9 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %exception = tail call i8* @__cxa_allocate_exception(i32 4) #1
-  %0 = bitcast i8* %exception to i32*
-  store i32 1, i32* %0, align 8
-  tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #2
+  %exception = tail call ptr @__cxa_allocate_exception(i32 4) #1
+  store i32 1, ptr %exception, align 8
+  tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #2
   unreachable
 
 if.end:                                           ; preds = %entry
@@ -63,9 +62,9 @@ if.end:                                           ; preds = %entry
 ; CHECK: aut
 ; CHECK:     .cfi_endproc
 
-declare dso_local i8* @__cxa_allocate_exception(i32) local_unnamed_addr
+declare dso_local ptr @__cxa_allocate_exception(i32) local_unnamed_addr
 
-declare dso_local void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare dso_local void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
 
 define hidden i32 @_Z1fiiii(i32 %a, i32 %b, i32 %c, i32 %d) local_unnamed_addr #0 {
 entry:

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
index d1fdbee0cf29c..6bfaf3bb520a0 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
@@ -21,19 +21,19 @@ target triple = "thumbv7m-arm-none-eabi"
 
 define hidden i32 @x() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
-  %6 = load volatile i32, i32* @g, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
+  %6 = load volatile i32, ptr @g, align 4
   %div3 = sdiv i32 %5, %6
-  %7 = load volatile i32, i32* @h, align 4
-  %8 = load volatile i32, i32* @i, align 4
+  %7 = load volatile i32, ptr @h, align 4
+  %8 = load volatile i32, ptr @i, align 4
   %add2 = add i32 %div, 1
   %add4 = add i32 %add2, %4
   %add5 = add i32 %add4, %div3
@@ -44,19 +44,19 @@ entry:
 
 define hidden i32 @y() local_unnamed_addr #0 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
-  %6 = load volatile i32, i32* @g, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
+  %6 = load volatile i32, ptr @g, align 4
   %div3 = sdiv i32 %5, %6
-  %7 = load volatile i32, i32* @h, align 4
-  %8 = load volatile i32, i32* @i, align 4
+  %7 = load volatile i32, ptr @h, align 4
+  %8 = load volatile i32, ptr @i, align 4
   %add2 = add i32 %div, 2
   %add4 = add i32 %add2, %4
   %add5 = add i32 %add4, %div3
@@ -67,19 +67,19 @@ entry:
 
 define hidden i32 @z() local_unnamed_addr #1 {
 entry:
-  %0 = load volatile i32, i32* @a, align 4
-  %1 = load volatile i32, i32* @b, align 4
+  %0 = load volatile i32, ptr @a, align 4
+  %1 = load volatile i32, ptr @b, align 4
   %add = add nsw i32 %1, %0
-  %2 = load volatile i32, i32* @c, align 4
-  %3 = load volatile i32, i32* @d, align 4
+  %2 = load volatile i32, ptr @c, align 4
+  %3 = load volatile i32, ptr @d, align 4
   %add1 = add nsw i32 %3, %2
   %div = sdiv i32 %add, %add1
-  %4 = load volatile i32, i32* @e, align 4
-  %5 = load volatile i32, i32* @f, align 4
-  %6 = load volatile i32, i32* @g, align 4
+  %4 = load volatile i32, ptr @e, align 4
+  %5 = load volatile i32, ptr @f, align 4
+  %6 = load volatile i32, ptr @g, align 4
   %div3 = sdiv i32 %5, %6
-  %7 = load volatile i32, i32* @h, align 4
-  %8 = load volatile i32, i32* @i, align 4
+  %7 = load volatile i32, ptr @h, align 4
+  %8 = load volatile i32, ptr @i, align 4
   %add2 = add i32 %div, 3
   %add4 = add i32 %add2, %4
   %add5 = add i32 %add4, %div3

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-overalign.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-overalign.ll
index 1f3626585209d..371b703453fff 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-overalign.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-overalign.ll
@@ -17,19 +17,17 @@ target triple = "thumbv8.1m.main-arm-none-eabi"
 define hidden i32 @_Z1fv() local_unnamed_addr {
 entry:
   %a = alloca [4 x i32], align 32
-  %0 = bitcast [4 x i32]* %a to i8*
-  %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i32 0
-  %call = call i32 @_Z1giPi(i32 4, i32* nonnull %arraydecay)
-  %1 = load i32, i32* %arraydecay, align 32
-  %arrayidx.1 = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i32 1
-  %2 = load i32, i32* %arrayidx.1, align 4
-  %add.1 = add nsw i32 %2, %1
-  %arrayidx.2 = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i32 2
-  %3 = load i32, i32* %arrayidx.2, align 8
-  %add.2 = add nsw i32 %3, %add.1
-  %arrayidx.3 = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i32 3
-  %4 = load i32, i32* %arrayidx.3, align 4
-  %add.3 = add nsw i32 %4, %add.2
+  %call = call i32 @_Z1giPi(i32 4, ptr nonnull %a)
+  %0 = load i32, ptr %a, align 32
+  %arrayidx.1 = getelementptr inbounds [4 x i32], ptr %a, i32 0, i32 1
+  %1 = load i32, ptr %arrayidx.1, align 4
+  %add.1 = add nsw i32 %1, %0
+  %arrayidx.2 = getelementptr inbounds [4 x i32], ptr %a, i32 0, i32 2
+  %2 = load i32, ptr %arrayidx.2, align 8
+  %add.2 = add nsw i32 %2, %add.1
+  %arrayidx.3 = getelementptr inbounds [4 x i32], ptr %a, i32 0, i32 3
+  %3 = load i32, ptr %arrayidx.3, align 4
+  %add.3 = add nsw i32 %3, %add.2
   ret i32 %add.3
 }
 
@@ -56,7 +54,7 @@ entry:
 ; CHECK-NEXT: bx      lr
 
 
-declare dso_local i32 @_Z1giPi(i32, i32*) local_unnamed_addr
+declare dso_local i32 @_Z1giPi(i32, ptr) local_unnamed_addr
 
 !llvm.module.flags = !{!0, !1, !2}
 

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-unsupported-arch.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-unsupported-arch.ll
index 979292a5b1f28..b9daac7d18f7b 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-unsupported-arch.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-unsupported-arch.ll
@@ -8,8 +8,8 @@
 define hidden i32 @f(i32 %x) #0 {
 entry:
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %sub = sub nsw i32 1, %0
   %call = call i32 @g(i32 %sub)
   %add = add nsw i32 1, %call

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-1.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-1.ll
index 87c9e41acd6ed..0cee42d137fb4 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-1.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-1.ll
@@ -2,35 +2,32 @@
 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv8.1m.main-arm-none-eabi"
 
-%"struct.std::__va_list" = type { i8* }
+%"struct.std::__va_list" = type { ptr }
 
 define hidden i32 @_Z1fiz(i32 %n, ...) local_unnamed_addr #0 {
 entry:
   %ap = alloca %"struct.std::__va_list", align 4
-  %0 = bitcast %"struct.std::__va_list"* %ap to i8*
-  call void @llvm.va_start(i8* nonnull %0)
+  call void @llvm.va_start(ptr nonnull %ap)
   %cmp7 = icmp sgt i32 %n, 0
   br i1 %cmp7, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %1 = getelementptr inbounds %"struct.std::__va_list", %"struct.std::__va_list"* %ap, i32 0, i32 0
-  %argp.cur.pre = load i8*, i8** %1, align 4
+  %argp.cur.pre = load ptr, ptr %ap, align 4
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %entry
   %s.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  call void @llvm.va_end(i8* nonnull %0)
+  call void @llvm.va_end(ptr nonnull %ap)
   ret i32 %s.0.lcssa
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
-  %argp.cur = phi i8* [ %argp.cur.pre, %for.body.lr.ph ], [ %argp.next, %for.body ]
+  %argp.cur = phi ptr [ %argp.cur.pre, %for.body.lr.ph ], [ %argp.next, %for.body ]
   %i.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %s.08 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-  store i8* %argp.next, i8** %1, align 4
-  %2 = bitcast i8* %argp.cur to i32*
-  %3 = load i32, i32* %2, align 4
-  %add = add nsw i32 %3, %s.08
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %ap, align 4
+  %0 = load i32, ptr %argp.cur, align 4
+  %add = add nsw i32 %0, %s.08
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond.not = icmp eq i32 %inc, %n
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
@@ -64,8 +61,8 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
 ; CHECK-NEXT: aut    r12, lr, sp
 ; CHECK-NEXT: bx    lr
 
-declare void @llvm.va_start(i8*) #1
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_start(ptr) #1
+declare void @llvm.va_end(ptr) #1
 
 attributes #0 = { nounwind optsize}
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-2.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-2.ll
index e6069f6a1c908..3c36b6913581b 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-2.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-varargs-2.ll
@@ -16,34 +16,31 @@ target triple = "thumbv8.1m.main-arm-none-eabi"
 ;   return s;
 ; }
 
-%"struct.std::__va_list" = type { i8* }
+%"struct.std::__va_list" = type { ptr }
 
 define hidden i32 @_Z1fiz(i32 %n, ...) local_unnamed_addr #0 {
 entry:
   %ap = alloca %"struct.std::__va_list", align 4
-  %0 = bitcast %"struct.std::__va_list"* %ap to i8*
-  call void @llvm.va_start(i8* nonnull %0)
+  call void @llvm.va_start(ptr nonnull %ap)
   %cmp7 = icmp sgt i32 %n, 0
   br i1 %cmp7, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %1 = getelementptr inbounds %"struct.std::__va_list", %"struct.std::__va_list"* %ap, i32 0, i32 0
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %entry
   %s.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  call void @llvm.va_end(i8* nonnull %0)
+  call void @llvm.va_end(ptr nonnull %ap)
   ret i32 %s.0.lcssa
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %i.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
   %s.08 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
-  %argp.cur = load i8*, i8** %1, align 4
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-  store i8* %argp.next, i8** %1, align 4
-  %2 = bitcast i8* %argp.cur to i32*
-  %3 = load i32, i32* %2, align 4
-  %call = call i32 @_Z1gi(i32 %3)
+  %argp.cur = load ptr, ptr %ap, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %ap, align 4
+  %0 = load i32, ptr %argp.cur, align 4
+  %call = call i32 @_Z1gi(i32 %0)
   %add = add nsw i32 %call, %s.08
   %inc = add nuw nsw i32 %i.09, 1
   %exitcond.not = icmp eq i32 %inc, %n
@@ -80,8 +77,8 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
 ; CHECK-NEXT:  aut    r12, lr, sp
 ; CHECK-NEXT:  bx     lr
 
-declare void @llvm.va_start(i8*) #1
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_start(ptr) #1
+declare void @llvm.va_end(ptr) #1
 
 declare dso_local i32 @_Z1gi(i32) local_unnamed_addr
 

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
index 898410de1520a..aa556b4418f58 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
@@ -16,7 +16,7 @@ target triple = "thumbv8.1m.main-arm-none-eabi"
 define hidden i32 @f(i32 %n) local_unnamed_addr #0 {
 entry:
   %vla = alloca i32, i32 %n, align 4
-  %call = call i32 @g(i32 %n, i32* nonnull %vla) #0
+  %call = call i32 @g(i32 %n, ptr nonnull %vla) #0
   %cmp8 = icmp sgt i32 %n, 0
   br i1 %cmp8, label %for.body.preheader, label %for.cond.cleanup
 
@@ -38,8 +38,8 @@ for.cond.cleanup.loopexit.unr-lcssa:              ; preds = %for.body, %for.body
   br i1 %lcmp.mod.not, label %for.cond.cleanup, label %for.body.epil
 
 for.body.epil:                                    ; preds = %for.cond.cleanup.loopexit.unr-lcssa
-  %arrayidx.epil = getelementptr inbounds i32, i32* %vla, i32 %i.010.unr
-  %2 = load i32, i32* %arrayidx.epil, align 4
+  %arrayidx.epil = getelementptr inbounds i32, ptr %vla, i32 %i.010.unr
+  %2 = load i32, ptr %arrayidx.epil, align 4
   %add.epil = add nsw i32 %2, %s.09.unr
   %epil.iter.cmp.not = icmp eq i32 %xtraiter, 1
   br i1 %epil.iter.cmp.not, label %for.cond.cleanup, label %for.body.epil.1
@@ -52,20 +52,20 @@ for.body:                                         ; preds = %for.body, %for.body
   %i.010 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
   %s.09 = phi i32 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
   %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %vla, i32 %i.010
-  %3 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %vla, i32 %i.010
+  %3 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %3, %s.09
   %inc = or i32 %i.010, 1
-  %arrayidx.1 = getelementptr inbounds i32, i32* %vla, i32 %inc
-  %4 = load i32, i32* %arrayidx.1, align 4
+  %arrayidx.1 = getelementptr inbounds i32, ptr %vla, i32 %inc
+  %4 = load i32, ptr %arrayidx.1, align 4
   %add.1 = add nsw i32 %4, %add
   %inc.1 = or i32 %i.010, 2
-  %arrayidx.2 = getelementptr inbounds i32, i32* %vla, i32 %inc.1
-  %5 = load i32, i32* %arrayidx.2, align 4
+  %arrayidx.2 = getelementptr inbounds i32, ptr %vla, i32 %inc.1
+  %5 = load i32, ptr %arrayidx.2, align 4
   %add.2 = add nsw i32 %5, %add.1
   %inc.2 = or i32 %i.010, 3
-  %arrayidx.3 = getelementptr inbounds i32, i32* %vla, i32 %inc.2
-  %6 = load i32, i32* %arrayidx.3, align 4
+  %arrayidx.3 = getelementptr inbounds i32, ptr %vla, i32 %inc.2
+  %6 = load i32, ptr %arrayidx.3, align 4
   %add.3 = add nsw i32 %6, %add.2
   %inc.3 = add nuw nsw i32 %i.010, 4
   %niter.nsub.3 = add i32 %niter, -4
@@ -74,16 +74,16 @@ for.body:                                         ; preds = %for.body, %for.body
 
 for.body.epil.1:                                  ; preds = %for.body.epil
   %inc.epil = add nuw nsw i32 %i.010.unr, 1
-  %arrayidx.epil.1 = getelementptr inbounds i32, i32* %vla, i32 %inc.epil
-  %7 = load i32, i32* %arrayidx.epil.1, align 4
+  %arrayidx.epil.1 = getelementptr inbounds i32, ptr %vla, i32 %inc.epil
+  %7 = load i32, ptr %arrayidx.epil.1, align 4
   %add.epil.1 = add nsw i32 %7, %add.epil
   %epil.iter.cmp.1.not = icmp eq i32 %xtraiter, 2
   br i1 %epil.iter.cmp.1.not, label %for.cond.cleanup, label %for.body.epil.2
 
 for.body.epil.2:                                  ; preds = %for.body.epil.1
   %inc.epil.1 = add nuw nsw i32 %i.010.unr, 2
-  %arrayidx.epil.2 = getelementptr inbounds i32, i32* %vla, i32 %inc.epil.1
-  %8 = load i32, i32* %arrayidx.epil.2, align 4
+  %arrayidx.epil.2 = getelementptr inbounds i32, ptr %vla, i32 %inc.epil.1
+  %8 = load i32, ptr %arrayidx.epil.2, align 4
   %add.epil.2 = add nsw i32 %8, %add.epil.1
   br label %for.cond.cleanup
 }
@@ -114,7 +114,7 @@ for.body.epil.2:                                  ; preds = %for.body.epil.1
 ; CHECK-NEXT: aut    r12, lr, sp
 ; CHECK-NEXT: bx     lr
 
-declare dso_local i32 @g(i32, i32*) local_unnamed_addr #0
+declare dso_local i32 @g(i32, ptr) local_unnamed_addr #0
 
 attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Thumb2/pic-load.ll b/llvm/test/CodeGen/Thumb2/pic-load.ll
index 5fa4d93fd7aa2..1f2a7d891671d 100644
--- a/llvm/test/CodeGen/Thumb2/pic-load.ll
+++ b/llvm/test/CodeGen/Thumb2/pic-load.ll
@@ -2,12 +2,12 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -relocation-model=pic -o - | FileCheck %s --check-prefix=CHECK-PIC
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -relocation-model=pic -mcpu=swift -mattr=+no-movt | FileCheck %s --check-prefix=CHECK-NOMOVT
 
-%struct.anon = type { void ()* }
-%struct.one_atexit_routine = type { %struct.anon, i32, i8* }
- at __dso_handle = external global { }		; <{ }*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (void ()*)* @atexit to i8*)], section "llvm.metadata"		; <[1 x i8*]*> [#uses=0]
+%struct.anon = type { ptr }
+%struct.one_atexit_routine = type { %struct.anon, i32, ptr }
+ at __dso_handle = external global { }		; <ptr> [#uses=1]
+ at llvm.used = appending global [1 x ptr] [ptr @atexit], section "llvm.metadata"		; <ptr> [#uses=0]
 
-define i32 @atexit(void ()* %func) {
+define i32 @atexit(ptr %func) {
 ; CHECK-PIC-LABEL: atexit:
 ; CHECK-PIC:       @ %bb.0: @ %entry
 ; CHECK-PIC-NEXT:    str lr, [sp, #-4]!
@@ -48,13 +48,13 @@ define i32 @atexit(void ()* %func) {
 ; CHECK-NOMOVT-NEXT:    .long L___dso_handle$non_lazy_ptr-(LPC0_0+4)
 ; CHECK-NOMOVT-NEXT:    .end_data_region
 entry:
-  %r = alloca %struct.one_atexit_routine, align 4		; <%struct.one_atexit_routine*> [#uses=3]
-  %0 = getelementptr %struct.one_atexit_routine, %struct.one_atexit_routine* %r, i32 0, i32 0, i32 0		; <void ()**> [#uses=1]
-  store void ()* %func, void ()** %0, align 4
-  %1 = getelementptr %struct.one_atexit_routine, %struct.one_atexit_routine* %r, i32 0, i32 1		; <i32*> [#uses=1]
-  store i32 0, i32* %1, align 4
-  %2 = call  i32 @atexit_common(%struct.one_atexit_routine* %r, i8* bitcast ({ }* @__dso_handle to i8*)) nounwind		; <i32> [#uses=1]
+  %r = alloca %struct.one_atexit_routine, align 4		; <ptr> [#uses=3]
+  %0 = getelementptr %struct.one_atexit_routine, ptr %r, i32 0, i32 0, i32 0		; <ptr> [#uses=1]
+  store ptr %func, ptr %0, align 4
+  %1 = getelementptr %struct.one_atexit_routine, ptr %r, i32 0, i32 1		; <ptr> [#uses=1]
+  store i32 0, ptr %1, align 4
+  %2 = call  i32 @atexit_common(ptr %r, ptr @__dso_handle) nounwind		; <i32> [#uses=1]
   ret i32 %2
 }
 
-declare i32 @atexit_common(%struct.one_atexit_routine*, i8*) nounwind
+declare i32 @atexit_common(ptr, ptr) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/pr52817.ll b/llvm/test/CodeGen/Thumb2/pr52817.ll
index ce1af0593f85a..87615f0a1f7ef 100644
--- a/llvm/test/CodeGen/Thumb2/pr52817.ll
+++ b/llvm/test/CodeGen/Thumb2/pr52817.ll
@@ -9,7 +9,7 @@ target triple = "thumbv7-apple-ios9.0.0"
 %struct.ham = type { [1024 x i32], %struct.zot }
 %struct.zot = type { [1 x i32], [1 x [32 x i32]] }
 
-define i32 @test(%struct.ham* %arg, %struct.zot* %arg1, i32* %arg2) #0 !dbg !6 {
+define i32 @test(ptr %arg, ptr %arg1, ptr %arg2) #0 !dbg !6 {
 ; CHECK-LABEL: test:
 ; CHECK:       Lfunc_begin0:
 ; CHECK-NEXT:    .file 1 "/" "test.cpp"
@@ -53,25 +53,25 @@ bb3:                                              ; preds = %bb14, %bb
   %tmp5 = add i32 %tmp4, 1
   %tmp6 = shl nuw i32 1, %tmp4
   %tmp8 = and i32 %tmp4, %tmp6
-  store i32 0, i32* %arg2, align 4
-  %tmp10 = getelementptr inbounds %struct.ham, %struct.ham* %arg, i32 0, i32 1, i32 1, i32 0, i32 %tmp5
+  store i32 0, ptr %arg2, align 4
+  %tmp10 = getelementptr inbounds %struct.ham, ptr %arg, i32 0, i32 1, i32 1, i32 0, i32 %tmp5
   br i1 undef, label %bb11, label %bb13
 
 bb11:                                             ; preds = %bb3
-  %tmp12 = load i32, i32* null, align 4
+  %tmp12 = load i32, ptr null, align 4
   br label %bb14
 
 bb13:                                             ; preds = %bb3
-  call void @llvm.dbg.value(metadata !DIArgList(%struct.ham* %arg, i32 %tmp5), metadata !11, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 135168, DW_OP_LLVM_arg, 1, DW_OP_constu, 4, DW_OP_mul, DW_OP_plus, DW_OP_plus_uconst, 4, DW_OP_stack_value)), !dbg !14
-  store i32 0, i32* %tmp10, align 4, !dbg !16
+  call void @llvm.dbg.value(metadata !DIArgList(ptr %arg, i32 %tmp5), metadata !11, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 135168, DW_OP_LLVM_arg, 1, DW_OP_constu, 4, DW_OP_mul, DW_OP_plus, DW_OP_plus_uconst, 4, DW_OP_stack_value)), !dbg !14
+  store i32 0, ptr %tmp10, align 4, !dbg !16
   br label %bb14
 
 bb14:                                             ; preds = %bb13, %bb11
   %tmp15 = phi i32 [ 1, %bb11 ], [ 0, %bb13 ]
-  %tmp16 = getelementptr inbounds %struct.zot, %struct.zot* %arg1, i32 0, i32 1, i32 0, i32 %tmp5
-  %tmp17 = load i32, i32* %tmp16, align 4
+  %tmp16 = getelementptr inbounds %struct.zot, ptr %arg1, i32 0, i32 1, i32 0, i32 %tmp5
+  %tmp17 = load i32, ptr %tmp16, align 4
   %tmp18 = or i32 %tmp17, %tmp15
-  store i32 %tmp18, i32* %tmp10, align 4
+  store i32 %tmp18, ptr %tmp10, align 4
   br label %bb3
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/schedm7-hazard.ll b/llvm/test/CodeGen/Thumb2/schedm7-hazard.ll
index 9572300d8e228..d3e31d192d570 100644
--- a/llvm/test/CodeGen/Thumb2/schedm7-hazard.ll
+++ b/llvm/test/CodeGen/Thumb2/schedm7-hazard.ll
@@ -7,7 +7,7 @@
 ; their latency. But will bank conflict to TCM so are scheduled in 
diff erent
 ; cycles.
 
-define i32 @test(i32* %x0, i32 %y, i32 %z) {
+define i32 @test(ptr %x0, i32 %y, i32 %z) {
 ; CHECK-LABEL: test:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    ldr r3, [r0]
@@ -27,12 +27,12 @@ define i32 @test(i32* %x0, i32 %y, i32 %z) {
 ; NOBANK-NEXT:    muls r0, r1, r0
 ; NOBANK-NEXT:    bx lr
 entry:
-  %0 = load i32, i32* %x0, align 4
+  %0 = load i32, ptr %x0, align 4
   %mul3 = add nsw i32 %0, 1
   %mul = sub nsw i32 %mul3, %y
   %sub = sub nsw i32 %mul, %z
-  %arrayidx1 = getelementptr inbounds i32, i32* %x0, i32 2
-  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %x0, i32 2
+  %1 = load i32, ptr %arrayidx1, align 4
   %mul2 = mul nsw i32 %sub, %1
   ret i32 %mul2
 }

diff  --git a/llvm/test/CodeGen/Thumb2/segmented-stacks.ll b/llvm/test/CodeGen/Thumb2/segmented-stacks.ll
index 70892073dea6e..6d3dbf4eb38f6 100644
--- a/llvm/test/CodeGen/Thumb2/segmented-stacks.ll
+++ b/llvm/test/CodeGen/Thumb2/segmented-stacks.ll
@@ -4,7 +4,7 @@
 
 
 ; Just to prevent the alloca from being optimized away
-declare void @dummy_use(i32*, i32)
+declare void @dummy_use(ptr, i32)
 
 define void @test_basic() #0 {
 ; THUMB-LABEL: test_basic:
@@ -63,13 +63,13 @@ define void @test_basic() #0 {
 ; ARM-NEXT:    add sp, sp, #40
 ; ARM-NEXT:    pop {r11, pc}
   %mem = alloca i32, i32 10
-  call void @dummy_use (i32* %mem, i32 10)
+  call void @dummy_use (ptr %mem, i32 10)
   ret void
 }
 
 define void @test_large() #0 {
         %mem = alloca i32, i32 10000
-        call void @dummy_use (i32* %mem, i32 0)
+        call void @dummy_use (ptr %mem, i32 0)
         ret void
 
 ; THUMB-LABEL:   test_large:
@@ -123,7 +123,7 @@ define void @test_large() #0 {
 
 define fastcc void @test_fastcc_large() #0 {
         %mem = alloca i32, i32 10000
-        call void @dummy_use (i32* %mem, i32 0)
+        call void @dummy_use (ptr %mem, i32 0)
         ret void
 
 ; THUMB-LABEL:   test_fastcc_large:

diff  --git a/llvm/test/CodeGen/Thumb2/setjmp_longjmp.ll b/llvm/test/CodeGen/Thumb2/setjmp_longjmp.ll
index c7fef12e06d39..4e700ce493b35 100644
--- a/llvm/test/CodeGen/Thumb2/setjmp_longjmp.ll
+++ b/llvm/test/CodeGen/Thumb2/setjmp_longjmp.ll
@@ -2,10 +2,10 @@
 ; RUN: llc %s -o - | FileCheck %s
 target triple = "thumbv7-apple-ios"
 
-declare i32 @llvm.eh.sjlj.setjmp(i8*)
-declare void @llvm.eh.sjlj.longjmp(i8*)
-declare i8* @llvm.frameaddress(i32)
-declare i8* @llvm.stacksave()
+declare i32 @llvm.eh.sjlj.setjmp(ptr)
+declare void @llvm.eh.sjlj.longjmp(ptr)
+declare ptr @llvm.frameaddress(i32)
+declare ptr @llvm.stacksave()
 @g = external global i32
 
 define void @double_foobar() {
@@ -80,46 +80,44 @@ define void @double_foobar() {
 ; CHECK-NEXT:    ldr r7, [r0]
 ; CHECK-NEXT:    bx r1
 entry:
-  %buf = alloca [5 x i8*], align 4
-  %bufptr = bitcast [5 x i8*]* %buf to i8*
-  %arraydecay = getelementptr inbounds [5 x i8*], [5 x i8*]* %buf, i32 0, i32 0
+  %buf = alloca [5 x ptr], align 4
 
-  %fa = tail call i8* @llvm.frameaddress(i32 0)
-  store i8* %fa, i8** %arraydecay, align 4
-  %ss = tail call i8* @llvm.stacksave()
-  %ssgep = getelementptr [5 x i8*], [5 x i8*]* %buf, i32 0, i32 2
-  store i8* %ss, i8** %ssgep, align 4
+  %fa = tail call ptr @llvm.frameaddress(i32 0)
+  store ptr %fa, ptr %buf, align 4
+  %ss = tail call ptr @llvm.stacksave()
+  %ssgep = getelementptr [5 x ptr], ptr %buf, i32 0, i32 2
+  store ptr %ss, ptr %ssgep, align 4
 
-  %setjmpres = call i32 @llvm.eh.sjlj.setjmp(i8* %bufptr)
+  %setjmpres = call i32 @llvm.eh.sjlj.setjmp(ptr %buf)
   %tobool = icmp ne i32 %setjmpres, 0
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:
-  store volatile i32 1, i32* @g, align 4
+  store volatile i32 1, ptr @g, align 4
   br label %if.end
 
 if.else:
-  store volatile i32 0, i32* @g, align 4
-  call void @llvm.eh.sjlj.longjmp(i8* %bufptr)
+  store volatile i32 0, ptr @g, align 4
+  call void @llvm.eh.sjlj.longjmp(ptr %buf)
   unreachable
 
 if.end:
-  %fa2 = tail call i8* @llvm.frameaddress(i32 0)
-  store i8* %fa2, i8** %arraydecay, align 4
-  %ss2 = tail call i8* @llvm.stacksave()
-  store i8* %ss2, i8** %ssgep, align 4
+  %fa2 = tail call ptr @llvm.frameaddress(i32 0)
+  store ptr %fa2, ptr %buf, align 4
+  %ss2 = tail call ptr @llvm.stacksave()
+  store ptr %ss2, ptr %ssgep, align 4
 
-  %setjmpres2 = call i32 @llvm.eh.sjlj.setjmp(i8* %bufptr)
+  %setjmpres2 = call i32 @llvm.eh.sjlj.setjmp(ptr %buf)
   %tobool2 = icmp ne i32 %setjmpres2, 0
   br i1 %tobool2, label %if2.then, label %if2.else
 
 if2.then:
-  store volatile i32 3, i32* @g, align 4
+  store volatile i32 3, ptr @g, align 4
   br label %if2.end
 
 if2.else:
-  store volatile i32 2, i32* @g, align 4
-  call void @llvm.eh.sjlj.longjmp(i8* %bufptr)
+  store volatile i32 2, ptr @g, align 4
+  call void @llvm.eh.sjlj.longjmp(ptr %buf)
   unreachable
 
 if2.end:

diff  --git a/llvm/test/CodeGen/Thumb2/shift_parts.ll b/llvm/test/CodeGen/Thumb2/shift_parts.ll
index d32b386c5910e..b4ac405d82ed5 100644
--- a/llvm/test/CodeGen/Thumb2/shift_parts.ll
+++ b/llvm/test/CodeGen/Thumb2/shift_parts.ll
@@ -195,7 +195,7 @@ entry:
 
 %struct.bar = type { i16, i8, [5 x i8] }
 
-define arm_aapcs_vfpcc void @fn1(%struct.bar* nocapture %a) {
+define arm_aapcs_vfpcc void @fn1(ptr nocapture %a) {
 ; CHECK-MVE-LABEL: fn1:
 ; CHECK-MVE:       @ %bb.0: @ %entry
 ; CHECK-MVE-NEXT:    ldr r2, [r0, #4]
@@ -214,17 +214,16 @@ define arm_aapcs_vfpcc void @fn1(%struct.bar* nocapture %a) {
 ; CHECK-NON-MVE-NEXT:    str.w r1, [r0, #3]
 ; CHECK-NON-MVE-NEXT:    bx lr
 entry:
-  %carey = getelementptr inbounds %struct.bar, %struct.bar* %a, i32 0, i32 2
-  %0 = bitcast [5 x i8]* %carey to i40*
-  %bf.load = load i40, i40* %0, align 1
+  %carey = getelementptr inbounds %struct.bar, ptr %a, i32 0, i32 2
+  %bf.load = load i40, ptr %carey, align 1
   %bf.clear = and i40 %bf.load, -256
-  store i40 %bf.clear, i40* %0, align 1
+  store i40 %bf.clear, ptr %carey, align 1
   ret void
 }
 
 %struct.a = type { i96 }
 
-define void @lsll_128bit_shift(%struct.a* nocapture %x) local_unnamed_addr #0 {
+define void @lsll_128bit_shift(ptr nocapture %x) local_unnamed_addr #0 {
 ; CHECK-LABEL: lsll_128bit_shift:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r1, #0
@@ -232,16 +231,15 @@ define void @lsll_128bit_shift(%struct.a* nocapture %x) local_unnamed_addr #0 {
 ; CHECK-NEXT:    str r1, [r0, #8]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast %struct.a* %x to i128*
-  %bf.load = load i128, i128* %0, align 8
+  %bf.load = load i128, ptr %x, align 8
   %bf.clear4 = and i128 %bf.load, -79228162514264337593543950336
-  store i128 %bf.clear4, i128* %0, align 8
+  store i128 %bf.clear4, ptr %x, align 8
   ret void
 }
 
 %struct.b = type { i184 }
 
-define void @lsll_256bit_shift(%struct.b* nocapture %x) local_unnamed_addr #0 {
+define void @lsll_256bit_shift(ptr nocapture %x) local_unnamed_addr #0 {
 ; CHECK-LABEL: lsll_256bit_shift:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    movs r1, #0
@@ -253,10 +251,9 @@ define void @lsll_256bit_shift(%struct.b* nocapture %x) local_unnamed_addr #0 {
 ; CHECK-NEXT:    str r1, [r0, #20]
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = bitcast %struct.b* %x to i192*
-  %bf.load = load i192, i192* %0, align 8
+  %bf.load = load i192, ptr %x, align 8
   %bf.clear4 = and i192 %bf.load, -24519928653854221733733552434404946937899825954937634816
-  store i192 %bf.clear4, i192* %0, align 8
+  store i192 %bf.clear4, ptr %x, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll b/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
index 4d96dc277f0dc..98a15ec9ade1a 100644
--- a/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
@@ -23,21 +23,19 @@
 ; Function Attrs: nounwind ssp
 define i32 @test_stack_guard_remat() #0 {
   %a1 = alloca [256 x i32], align 4
-  %1 = bitcast [256 x i32]* %a1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 1024, i8* %1)
-  %2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
-  call void @foo3(i32* %2) #3
+  call void @llvm.lifetime.start.p0(i64 1024, ptr %a1)
+  call void @foo3(ptr %a1) #3
   call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
-  call void @llvm.lifetime.end.p0i8(i64 1024, i8* %1)
+  call void @llvm.lifetime.end.p0(i64 1024, ptr %a1)
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 
-declare void @foo3(i32*)
+declare void @foo3(ptr)
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
 attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Thumb2/tail-call-r9.ll b/llvm/test/CodeGen/Thumb2/tail-call-r9.ll
index 33cbd3d37c991..b09fe71335e58 100644
--- a/llvm/test/CodeGen/Thumb2/tail-call-r9.ll
+++ b/llvm/test/CodeGen/Thumb2/tail-call-r9.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-m3 | FileCheck %s
 
- at foo = common global void ()* null, align 4
+ at foo = common global ptr null, align 4
 
 ; Make sure in the presence of a tail call, r9 doesn't get used to hold
 ; the destination address. It's callee-saved in AAPCS.
 define arm_aapcscc void @test(i32 %a) nounwind {
 ; CHECK-LABEL: test:
 ; CHECK-NOT: bx r9
-  %tmp = load void ()*, void ()** @foo, align 4
+  %tmp = load ptr, ptr @foo, align 4
   tail call void asm sideeffect "", "~{r0},~{r1},~{r2},~{r3},~{r12}"() nounwind
   tail call arm_aapcscc void %tmp() nounwind
   ret void

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-branch.ll b/llvm/test/CodeGen/Thumb2/thumb2-branch.ll
index 332ed50ede6f2..720116c7d06ac 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-branch.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-branch.ll
@@ -5,7 +5,7 @@
 
 declare void @foo()
 
-define i32 @f1(i32 %a, i32 %b, i32* %v) {
+define i32 @f1(i32 %a, i32 %b, ptr %v) {
 entry:
 ; CHECK-LABEL: f1:
 ; CHECK: bne LBB
@@ -14,7 +14,7 @@ entry:
 
 cond_true:              ; preds = %entry
         call void @foo()
-        store i32 0, i32* %v
+        store i32 0, ptr %v
         ret i32 0
 
 return:         ; preds = %entry
@@ -22,7 +22,7 @@ return:         ; preds = %entry
         ret i32 1
 }
 
-define i32 @f2(i32 %a, i32 %b, i32* %v) {
+define i32 @f2(i32 %a, i32 %b, ptr %v) {
 entry:
 ; CHECK-LABEL: f2:
 ; CHECK: bge LBB
@@ -31,7 +31,7 @@ entry:
 
 cond_true:              ; preds = %entry
         call void @foo()
-        store i32 0, i32* %v
+        store i32 0, ptr %v
         ret i32 0
 
 return:         ; preds = %entry
@@ -39,7 +39,7 @@ return:         ; preds = %entry
         ret i32 1
 }
 
-define i32 @f3(i32 %a, i32 %b, i32* %v) {
+define i32 @f3(i32 %a, i32 %b, ptr %v) {
 entry:
 ; CHECK-LABEL: f3:
 ; CHECK: bhs LBB
@@ -48,7 +48,7 @@ entry:
 
 cond_true:              ; preds = %entry
         call void @foo()
-        store i32 0, i32* %v
+        store i32 0, ptr %v
         ret i32 0
 
 return:         ; preds = %entry
@@ -56,7 +56,7 @@ return:         ; preds = %entry
         ret i32 1
 }
 
-define i32 @f4(i32 %a, i32 %b, i32* %v) {
+define i32 @f4(i32 %a, i32 %b, ptr %v) {
 entry:
 ; CHECK-LABEL: f4:
 ; CHECK: blo LBB
@@ -65,7 +65,7 @@ entry:
 
 cond_true:              ; preds = %entry
         call void @foo()
-        store i32 0, i32* %v
+        store i32 0, ptr %v
         ret i32 0
 
 return:         ; preds = %entry

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll b/llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll
index 96f63ba9ac0b6..1fec1b895e08f 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=thumbv7-linux -mattr=+thumb2 | FileCheck %s -check-prefix=LINUX
 ; XFAIL: *
 
- at t = weak global i32 ()* null           ; <i32 ()**> [#uses=1]
+ at t = weak global ptr null           ; <ptr> [#uses=1]
 
 declare void @g(i32, i32, i32, i32)
 
@@ -22,7 +22,7 @@ define void @h() {
 
 ; LINUX-LABEL: h:
 ; LINUX: bx r0 @ TAILCALL
-        %tmp = load i32 ()*, i32 ()** @t         ; <i32 ()*> [#uses=1]
+        %tmp = load ptr, ptr @t         ; <ptr> [#uses=1]
         %tmp.upgrd.2 = tail call i32 %tmp( )            ; <i32> [#uses=0]
         ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-call.ll b/llvm/test/CodeGen/Thumb2/thumb2-call.ll
index e06df642a93ac..2248e18f577fb 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-call.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-call.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
 ; RUN: llc < %s -mtriple=thumbv7-linux -mattr=+thumb2 | FileCheck %s
 
- at t = weak global i32 ()* null           ; <i32 ()**> [#uses=1]
+ at t = weak global ptr null           ; <ptr> [#uses=1]
 
 declare void @g(i32, i32, i32, i32)
 
@@ -15,7 +15,7 @@ define void @f() {
 define void @h() {
 ; CHECK-LABEL: h:
 ; CHECK: blx r0
-        %tmp = load i32 ()*, i32 ()** @t         ; <i32 ()*> [#uses=1]
+        %tmp = load ptr, ptr @t         ; <ptr> [#uses=1]
         %tmp.upgrd.2 = call i32 %tmp( )            ; <i32> [#uses=0]
         ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll b/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
index c1a53825e3b10..6ff9470483deb 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
@@ -29,7 +29,6 @@ bb9:                                              ; preds = %bb7
   br label %bb11
 
 bb11:                                             ; preds = %bb9, %bb7
-  %1 = getelementptr i32, i32* undef, i32 0
-  store i32 0, i32* %1
+  store i32 0, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-cpsr-liveness.ll b/llvm/test/CodeGen/Thumb2/thumb2-cpsr-liveness.ll
index 7987859889387..b6314bc62fbbf 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-cpsr-liveness.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-cpsr-liveness.ll
@@ -3,8 +3,6 @@
 define i32 @test_cpsr() {
 entry:
   %a = alloca [10 x i32], align 4
-  %0 = bitcast [10 x i32]* %a to i8*
-  %arrayidx.gep = getelementptr [10 x i32], [10 x i32]* %a, i32 0, i32 0
   br label %for.body
 
 for.cond.cleanup:
@@ -12,13 +10,13 @@ for.cond.cleanup:
   ret i32 %c.1.reg2mem.0.lcssa
 
 for.body:
-  %1 = phi i32 [ 0, %entry ], [ %.pre, %for.inc.for.body_crit_edge ]
+  %0 = phi i32 [ 0, %entry ], [ %.pre, %for.inc.for.body_crit_edge ]
   %c.018.reg2mem.0 = phi i32 [ 0, %entry ], [ %c.1.reg2mem.0, %for.inc.for.body_crit_edge ]
   %b.017.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %b.1.reg2mem.0, %for.inc.for.body_crit_edge ]
-  %arrayidx.phi = phi i32* [ %arrayidx.gep, %entry ], [ %arrayidx.inc, %for.inc.for.body_crit_edge ]
+  %arrayidx.phi = phi ptr [ %a, %entry ], [ %arrayidx.inc, %for.inc.for.body_crit_edge ]
   %i.019 = phi i32 [ 0, %entry ], [ %inc, %for.inc.for.body_crit_edge ]
-  %cmp1 = icmp slt i32 %1, 10
-  %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+  %cmp1 = icmp slt i32 %0, 10
+  %arrayidx.inc = getelementptr i32, ptr %arrayidx.phi, i32 1
   br i1 %cmp1, label %for.inc, label %if.end
 
 if.end:
@@ -36,6 +34,6 @@ for.inc:
 
 for.inc.for.body_crit_edge:
   %inc = add nuw nsw i32 %i.019, 1
-  %.pre = load i32, i32* %arrayidx.inc, align 4
+  %.pre = load i32, ptr %arrayidx.inc, align 4
   br label %for.body
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-execute-only-prologue.ll b/llvm/test/CodeGen/Thumb2/thumb2-execute-only-prologue.ll
index 767a2889c5b21..0d3e7882f1b8c 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-execute-only-prologue.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-execute-only-prologue.ll
@@ -12,8 +12,7 @@ entry:
 ; CHECK: .LCPI0_0:
 ; CHECK-NEXT:    .long 4294965696
   %a = alloca [400 x i32], align 4
-  %arraydecay = getelementptr inbounds [400 x i32], [400 x i32]* %a, i32 0, i32 0
-  call void @bar(i32* %arraydecay)
+  call void @bar(ptr %a)
   ret void
 }
 
@@ -28,11 +27,10 @@ entry:
 ; CHECK-NEXT:    .pad #1600
 ; CHECK-NEXT:    add sp, r6
   %a = alloca [400 x i32], align 4
-  %arraydecay = getelementptr inbounds [400 x i32], [400 x i32]* %a, i32 0, i32 0
-  call void @bar(i32* %arraydecay)
+  call void @bar(ptr %a)
   ret void
 }
 
-declare dso_local void @bar(i32*)
+declare dso_local void @bar(ptr)
 
 attributes #0 = { noinline optnone "target-features"="+armv8-m.base,+execute-only,+thumb-mode" }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
index ebc12dc3c1d53..6e31d6f5f10bf 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
@@ -59,12 +59,12 @@ bb17:		; preds = %cond_false, %cond_true, %entry
 	ret i32 %a_addr.026.1
 }
 
- at x = external global i32*		; <i32**> [#uses=1]
+ at x = external global ptr		; <ptr> [#uses=1]
 
 define void @foo(i32 %a) nounwind {
 entry:
-	%tmp = load i32*, i32** @x		; <i32*> [#uses=1]
-	store i32 %a, i32* %tmp
+	%tmp = load ptr, ptr @x		; <ptr> [#uses=1]
+	store i32 %a, ptr %tmp
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
index 95535a45b364f..8ef9578c75f0b 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
@@ -212,7 +212,7 @@ bb17:		; preds = %cond_false, %cond_true, %entry
 	ret i32 %a_addr.026.1
 }
 
- at x = external global i32*		; <i32**> [#uses=1]
+ at x = external global ptr		; <ptr> [#uses=1]
 
 define void @foo(i32 %a) nounwind {
 ; V01-LABEL: foo:
@@ -234,8 +234,8 @@ define void @foo(i32 %a) nounwind {
 ; V23-NEXT:    str r0, [r1]
 ; V23-NEXT:    bx lr
 entry:
-	%tmp = load i32*, i32** @x		; <i32*> [#uses=1]
-	store i32 %a, i32* %tmp
+	%tmp = load ptr, ptr @x		; <ptr> [#uses=1]
+	store i32 %a, ptr %tmp
 	ret void
 }
 

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index 0d94d314ce00b..62e6605a26a85 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -26,9 +26,9 @@ declare i32 @bar(...)
 
 ; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
 
-	%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
+	%struct.quad_struct = type { i32, i32, ptr, ptr, ptr, ptr, ptr }
 
-define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
+define fastcc i32 @CountTree(ptr %tree) {
 entry:
 ; CHECK-LABEL: CountTree:
 ; CHECK: bne
@@ -38,36 +38,36 @@ entry:
 	br label %tailrecurse
 
 tailrecurse:		; preds = %bb, %entry
-	%tmp6 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
-	%tmp9 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=2]
-	%tmp12 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
-	%tmp14 = icmp eq %struct.quad_struct* null, null		; <i1> [#uses=1]
-	%tmp17 = icmp eq %struct.quad_struct* %tmp6, null		; <i1> [#uses=1]
-	%tmp23 = icmp eq %struct.quad_struct* %tmp9, null		; <i1> [#uses=1]
-	%tmp29 = icmp eq %struct.quad_struct* %tmp12, null		; <i1> [#uses=1]
+	%tmp6 = load ptr, ptr null		; <ptr> [#uses=1]
+	%tmp9 = load ptr, ptr null		; <ptr> [#uses=2]
+	%tmp12 = load ptr, ptr null		; <ptr> [#uses=1]
+	%tmp14 = icmp eq ptr null, null		; <i1> [#uses=1]
+	%tmp17 = icmp eq ptr %tmp6, null		; <i1> [#uses=1]
+	%tmp23 = icmp eq ptr %tmp9, null		; <i1> [#uses=1]
+	%tmp29 = icmp eq ptr %tmp12, null		; <i1> [#uses=1]
 	%bothcond = and i1 %tmp17, %tmp14		; <i1> [#uses=1]
 	%bothcond1 = and i1 %bothcond, %tmp23		; <i1> [#uses=1]
 	%bothcond2 = and i1 %bothcond1, %tmp29		; <i1> [#uses=1]
 	br i1 %bothcond2, label %return, label %bb
 
 bb:		; preds = %tailrecurse
-	%tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 )		; <i32> [#uses=0]
+	%tmp41 = tail call fastcc i32 @CountTree( ptr %tmp9 )		; <i32> [#uses=0]
 	br label %tailrecurse
 
 return:		; preds = %tailrecurse
 	ret i32 0
 }
 
-	%struct.SString = type { i8*, i32, i32 }
+	%struct.SString = type { ptr, i32, i32 }
 
 declare void @abort()
 
-define fastcc void @t1(%struct.SString* %word, i8 signext  %c) {
+define fastcc void @t1(ptr %word, i8 signext  %c) {
 entry:
 ; CHECK-LABEL: t1:
 ; CHECK: it ne
 ; CHECK: bxne lr
-	%tmp1 = icmp eq %struct.SString* %word, null		; <i1> [#uses=1]
+	%tmp1 = icmp eq ptr %word, null		; <i1> [#uses=1]
 	br i1 %tmp1, label %cond_true, label %cond_false
 
 cond_true:		; preds = %entry

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
index 2960324470d56..40799521e67ad 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
@@ -5,27 +5,27 @@
 ; There shouldn't be a unconditional branch at end of bb52.
 ; rdar://7184787
 
- at posed = external global i64                      ; <i64*> [#uses=1]
+ at posed = external global i64                      ; <ptr> [#uses=1]
 
-define i1 @ab_bb52(i64 %.reload78, i64* %.out, i64* %.out1) nounwind {
+define i1 @ab_bb52(i64 %.reload78, ptr %.out, ptr %.out1) nounwind {
 newFuncRoot:
   br label %bb52
 
 bb52.bb55_crit_edge.exitStub:                     ; preds = %bb52
-  store i64 %0, i64* %.out
-  store i64 %2, i64* %.out1
+  store i64 %0, ptr %.out
+  store i64 %2, ptr %.out1
   ret i1 true
 
 bb52.bb53_crit_edge.exitStub:                     ; preds = %bb52
-  store i64 %0, i64* %.out
-  store i64 %2, i64* %.out1
+  store i64 %0, ptr %.out
+  store i64 %2, ptr %.out1
   ret i1 false
 
 bb52:                                             ; preds = %newFuncRoot
 ; CHECK: movne
 ; CHECK: moveq
 ; CHECK: pop
-  %0 = load i64, i64* @posed, align 4                  ; <i64> [#uses=3]
+  %0 = load i64, ptr @posed, align 4                  ; <i64> [#uses=3]
   %1 = sub i64 %0, %.reload78                     ; <i64> [#uses=1]
   %2 = ashr i64 %1, 1                             ; <i64> [#uses=3]
   %3 = icmp eq i64 %2, 0                          ; <i1> [#uses=1]

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
index a33efd204ba8e..547f927b34c05 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -mattr=+thumb2 | FileCheck %s -check-prefix=ALL -check-prefix=CHECK
 ; RUN: llc < %s -mtriple=thumbv7-apple-ios -mattr=+thumb2 -arm-assume-misaligned-load-store | FileCheck %s -check-prefix=ALL -check-prefix=CONSERVATIVE
 
- at X = external global [0 x i32]          ; <[0 x i32]*> [#uses=5]
+ at X = external global [0 x i32]          ; <ptr> [#uses=5]
 
 define i32 @t1() "frame-pointer"="all" {
 ; ALL-LABEL: t1:
@@ -10,8 +10,8 @@ define i32 @t1() "frame-pointer"="all" {
 ; CONSERVATIVE-NOT: ldrd
 ; CONSERVATIVE-NOT: ldm
 ; ALL: pop {r7, pc}
-        %tmp = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 0)            ; <i32> [#uses=1]
-        %tmp3 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 1)           ; <i32> [#uses=1]
+        %tmp = load i32, ptr @X            ; <i32> [#uses=1]
+        %tmp3 = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 1)           ; <i32> [#uses=1]
         %tmp4 = call i32 @f1( i32 %tmp, i32 %tmp3 )                ; <i32> [#uses=1]
         ret i32 %tmp4
 }
@@ -23,9 +23,9 @@ define i32 @t2() "frame-pointer"="all" {
 ; CONSERVATIVE-NOT: ldrd
 ; CONSERVATIVE-NOT: ldm
 ; ALL: pop {r7, pc}
-        %tmp = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 2)            ; <i32> [#uses=1]
-        %tmp3 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
-        %tmp5 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 4)           ; <i32> [#uses=1]
+        %tmp = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 2)            ; <i32> [#uses=1]
+        %tmp3 = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 3)           ; <i32> [#uses=1]
+        %tmp5 = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 4)           ; <i32> [#uses=1]
         %tmp6 = call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 )             ; <i32> [#uses=1]
         ret i32 %tmp6
 }
@@ -37,14 +37,14 @@ define i32 @t3() "frame-pointer"="all" {
 ; CONSERVATIVE-NOT: ldrd
 ; CONSERVATIVE-NOT: ldm
 ; ALL: pop {r7, pc}
-        %tmp = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 1)            ; <i32> [#uses=1]
-        %tmp3 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 2)           ; <i32> [#uses=1]
-        %tmp5 = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @X, i32 0, i32 3)           ; <i32> [#uses=1]
+        %tmp = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 1)            ; <i32> [#uses=1]
+        %tmp3 = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 2)           ; <i32> [#uses=1]
+        %tmp5 = load i32, ptr getelementptr ([0 x i32], ptr @X, i32 0, i32 3)           ; <i32> [#uses=1]
         %tmp6 = call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 )             ; <i32> [#uses=1]
         ret i32 %tmp6
 }
 
- at g = common global i32* null
+ at g = common global ptr null
 
 define void @t4(i32 %a0, i32 %a1, i32 %a2) "frame-pointer"="all" {
 ; ALL-LABEL: t4:
@@ -53,19 +53,18 @@ define void @t4(i32 %a0, i32 %a1, i32 %a2) "frame-pointer"="all" {
 ; ALL: ldm.w sp, {r0, r1, r2}
 ; ALL: bl _f2
   %arr = alloca [4 x i32], align 4
-  %p0 = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 0
-  %p1 = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 1
-  %p2 = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 2
-  store i32* %p0, i32** @g, align 8
+  %p1 = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 1
+  %p2 = getelementptr inbounds [4 x i32], ptr %arr, i64 0, i64 2
+  store ptr %arr, ptr @g, align 8
 
-  store i32 %a0, i32* %p0, align 4
-  store i32 %a1, i32* %p1, align 4
-  store i32 %a2, i32* %p2, align 4
+  store i32 %a0, ptr %arr, align 4
+  store i32 %a1, ptr %p1, align 4
+  store i32 %a2, ptr %p2, align 4
   call void @ext()
 
-  %v0 = load i32, i32* %p0, align 4
-  %v1 = load i32, i32* %p1, align 4
-  %v2 = load i32, i32* %p2, align 4
+  %v0 = load i32, ptr %arr, align 4
+  %v1 = load i32, ptr %p1, align 4
+  %v2 = load i32, ptr %p2, align 4
   call i32 @f2(i32 %v0, i32 %v1, i32 %v2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
index 4b3ce86ef8d17..413c59231c3bd 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
@@ -1,29 +1,29 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i32 @f1(i32* %v) {
+define i32 @f1(ptr %v) {
 entry:
 ; CHECK-LABEL: f1:
 ; CHECK: ldr r0, [r0]
-        %tmp = load i32, i32* %v
+        %tmp = load i32, ptr %v
         ret i32 %tmp
 }
 
-define i32 @f2(i32* %v) {
+define i32 @f2(ptr %v) {
 entry:
 ; CHECK-LABEL: f2:
 ; CHECK: ldr.w r0, [r0, #4092]
-        %tmp2 = getelementptr i32, i32* %v, i32 1023
-        %tmp = load i32, i32* %tmp2
+        %tmp2 = getelementptr i32, ptr %v, i32 1023
+        %tmp = load i32, ptr %tmp2
         ret i32 %tmp
 }
 
-define i32 @f3(i32* %v) {
+define i32 @f3(ptr %v) {
 entry:
 ; CHECK-LABEL: f3:
 ; CHECK: mov.w r1, #4096
 ; CHECK: ldr r0, [r0, r1]
-        %tmp2 = getelementptr i32, i32* %v, i32 1024
-        %tmp = load i32, i32* %tmp2
+        %tmp2 = getelementptr i32, ptr %v, i32 1024
+        %tmp = load i32, ptr %tmp2
         ret i32 %tmp
 }
 
@@ -32,8 +32,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: ldr r0, [r0, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i32*
-        %tmp3 = load i32, i32* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i32, ptr %tmp2
         ret i32 %tmp3
 }
 
@@ -42,8 +42,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: ldr r0, [r0, r1]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i32*
-        %tmp3 = load i32, i32* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i32, ptr %tmp2
         ret i32 %tmp3
 }
 
@@ -53,8 +53,8 @@ entry:
 ; CHECK: ldr.w r0, [r0, r1, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i32*
-        %tmp4 = load i32, i32* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i32, ptr %tmp3
         ret i32 %tmp4
 }
 
@@ -66,7 +66,7 @@ entry:
 
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i32*
-        %tmp4 = load i32, i32* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i32, ptr %tmp3
         ret i32 %tmp4
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
index a911775ebc7b0..a13b235e451a7 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
@@ -1,25 +1,25 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i32 @test1(i8* %v.pntr.s0.u1) {
-    %tmp.u = load i8, i8* %v.pntr.s0.u1
+define i32 @test1(ptr %v.pntr.s0.u1) {
+    %tmp.u = load i8, ptr %v.pntr.s0.u1
     %tmp1.s = zext i8 %tmp.u to i32
     ret i32 %tmp1.s
 }
 
-define i32 @test2(i16* %v.pntr.s0.u1) {
-    %tmp.u = load i16, i16* %v.pntr.s0.u1
+define i32 @test2(ptr %v.pntr.s0.u1) {
+    %tmp.u = load i16, ptr %v.pntr.s0.u1
     %tmp1.s = zext i16 %tmp.u to i32
     ret i32 %tmp1.s
 }
 
-define i32 @test3(i8* %v.pntr.s1.u0) {
-    %tmp.s = load i8, i8* %v.pntr.s1.u0
+define i32 @test3(ptr %v.pntr.s1.u0) {
+    %tmp.s = load i8, ptr %v.pntr.s1.u0
     %tmp1.s = sext i8 %tmp.s to i32
     ret i32 %tmp1.s
 }
 
 define i32 @test4() {
-    %tmp.s = load i16, i16* null
+    %tmp.s = load i16, ptr null
     %tmp1.s = sext i16 %tmp.s to i32
     ret i32 %tmp1.s
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
index cb7e795a04500..e42b0193c5513 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
@@ -2,8 +2,8 @@
 
 define i32 @test(i32 %a, i32 %b, i32 %c) {
         %tmp1 = mul i32 %a, %b          ; <i32> [#uses=2]
-        %tmp2 = inttoptr i32 %tmp1 to i32*              ; <i32*> [#uses=1]
-        %tmp3 = load i32, i32* %tmp2         ; <i32> [#uses=1]
+        %tmp2 = inttoptr i32 %tmp1 to ptr              ; <ptr> [#uses=1]
+        %tmp3 = load i32, ptr %tmp2         ; <i32> [#uses=1]
         %tmp4 = sub i32 %tmp1, 8               ; <i32> [#uses=1]
         %tmp5 = mul i32 %tmp4, %tmp3            ; <i32> [#uses=1]
         ret i32 %tmp5

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
index cfce1d0f37b4e..e2ab0d3e3f663 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
@@ -1,16 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i32* @test1(i32* %X, i32* %dest) {
+define ptr @test1(ptr %X, ptr %dest) {
 ; CHECK-LABEL: test1:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    ldr r2, [r0, #16]!
 ; CHECK-NEXT:    str r2, [r1]
 ; CHECK-NEXT:    bx lr
-        %Y = getelementptr i32, i32* %X, i32 4               ; <i32*> [#uses=2]
-        %A = load i32, i32* %Y               ; <i32> [#uses=1]
-        store i32 %A, i32* %dest
-        ret i32* %Y
+        %Y = getelementptr i32, ptr %X, i32 4               ; <ptr> [#uses=2]
+        %A = load i32, ptr %Y               ; <i32> [#uses=1]
+        store i32 %A, ptr %dest
+        ret ptr %Y
 }
 
 
@@ -22,23 +22,23 @@ define i32 @test2(i32 %a, i32 %b) {
 ; CHECK-NEXT:    add r0, r2
 ; CHECK-NEXT:    bx lr
         %tmp1 = sub i32 %a, 64          ; <i32> [#uses=2]
-        %tmp2 = inttoptr i32 %tmp1 to i32*              ; <i32*> [#uses=1]
-        %tmp3 = load i32, i32* %tmp2         ; <i32> [#uses=1]
+        %tmp2 = inttoptr i32 %tmp1 to ptr              ; <ptr> [#uses=1]
+        %tmp3 = load i32, ptr %tmp2         ; <i32> [#uses=1]
         %tmp4 = sub i32 %tmp1, %b               ; <i32> [#uses=1]
         %tmp5 = add i32 %tmp4, %tmp3            ; <i32> [#uses=1]
         ret i32 %tmp5
 }
 
 
-define i8* @test3(i8* %X, i32* %dest) {
+define ptr @test3(ptr %X, ptr %dest) {
 ; CHECK-LABEL: test3:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    ldrsb r2, [r0, #4]!
 ; CHECK-NEXT:    str r2, [r1]
 ; CHECK-NEXT:    bx lr
-        %tmp1 = getelementptr i8, i8* %X, i32 4
-        %tmp2 = load i8, i8* %tmp1
+        %tmp1 = getelementptr i8, ptr %X, i32 4
+        %tmp2 = load i8, ptr %tmp1
         %tmp3 = sext i8 %tmp2 to i32
-        store i32 %tmp3, i32* %dest
-        ret i8* %tmp1
+        store i32 %tmp3, ptr %dest
+        ret ptr %tmp1
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
index cf8fd6dca6df5..d4dca02ad2302 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
@@ -1,19 +1,19 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i8 @f1(i8* %v) {
+define i8 @f1(ptr %v) {
 entry:
 ; CHECK-LABEL: f1:
 ; CHECK: ldrb r0, [r0]
-        %tmp = load i8, i8* %v
+        %tmp = load i8, ptr %v
         ret i8 %tmp
 }
 
-define i8 @f2(i8* %v) {
+define i8 @f2(ptr %v) {
 entry:
 ; CHECK-LABEL: f2:
 ; CHECK: ldrb r0, [r0, #-1]
-        %tmp2 = getelementptr i8, i8* %v, i8 1023
-        %tmp = load i8, i8* %tmp2
+        %tmp2 = getelementptr i8, ptr %v, i8 1023
+        %tmp = load i8, ptr %tmp2
         ret i8 %tmp
 }
 
@@ -23,8 +23,8 @@ entry:
 ; CHECK: mov.w r1, #4096
 ; CHECK: ldrb r0, [r0, r1]
         %tmp1 = add i32 %base, 4096
-        %tmp2 = inttoptr i32 %tmp1 to i8*
-        %tmp3 = load i8, i8* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i8, ptr %tmp2
         ret i8 %tmp3
 }
 
@@ -33,8 +33,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: ldrb r0, [r0, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i8*
-        %tmp3 = load i8, i8* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i8, ptr %tmp2
         ret i8 %tmp3
 }
 
@@ -43,8 +43,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: ldrb r0, [r0, r1]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i8*
-        %tmp3 = load i8, i8* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i8, ptr %tmp2
         ret i8 %tmp3
 }
 
@@ -54,8 +54,8 @@ entry:
 ; CHECK: ldrb.w r0, [r0, r1, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i8*
-        %tmp4 = load i8, i8* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i8, ptr %tmp3
         ret i8 %tmp4
 }
 
@@ -66,7 +66,7 @@ entry:
 ; CHECK: ldrb r0, [r0, r1]
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i8*
-        %tmp4 = load i8, i8* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i8, ptr %tmp3
         ret i8 %tmp4
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
index c25359b40577e..a2cc17b03eb05 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
 
- at b = external global i64*
+ at b = external global ptr
 
 define i64 @t(i64 %a) nounwind readonly {
 entry:
 ; CHECK: ldrd
 ; CHECK: umull
-	%0 = load i64*, i64** @b, align 4
-	%1 = load i64, i64* %0, align 4
+	%0 = load ptr, ptr @b, align 4
+	%1 = load i64, ptr %0, align 4
 	%2 = mul i64 %1, %a
 	ret i64 %2
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll b/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
index 33dd681bb04b0..7187e4ec76ce9 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
@@ -1,29 +1,29 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i16 @f1(i16* %v) {
+define i16 @f1(ptr %v) {
 entry:
 ; CHECK-LABEL: f1:
 ; CHECK: ldrh r0, [r0]
-        %tmp = load i16, i16* %v
+        %tmp = load i16, ptr %v
         ret i16 %tmp
 }
 
-define i16 @f2(i16* %v) {
+define i16 @f2(ptr %v) {
 entry:
 ; CHECK-LABEL: f2:
 ; CHECK: ldrh.w r0, [r0, #2046]
-        %tmp2 = getelementptr i16, i16* %v, i16 1023
-        %tmp = load i16, i16* %tmp2
+        %tmp2 = getelementptr i16, ptr %v, i16 1023
+        %tmp = load i16, ptr %tmp2
         ret i16 %tmp
 }
 
-define i16 @f3(i16* %v) {
+define i16 @f3(ptr %v) {
 entry:
 ; CHECK-LABEL: f3:
 ; CHECK: mov.w r1, #4096
 ; CHECK: ldrh r0, [r0, r1]
-        %tmp2 = getelementptr i16, i16* %v, i16 2048
-        %tmp = load i16, i16* %tmp2
+        %tmp2 = getelementptr i16, ptr %v, i16 2048
+        %tmp = load i16, ptr %tmp2
         ret i16 %tmp
 }
 
@@ -32,8 +32,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: ldrh r0, [r0, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i16*
-        %tmp3 = load i16, i16* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i16, ptr %tmp2
         ret i16 %tmp3
 }
 
@@ -42,8 +42,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: ldrh r0, [r0, r1]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i16*
-        %tmp3 = load i16, i16* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        %tmp3 = load i16, ptr %tmp2
         ret i16 %tmp3
 }
 
@@ -53,8 +53,8 @@ entry:
 ; CHECK: ldrh.w r0, [r0, r1, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i16*
-        %tmp4 = load i16, i16* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i16, ptr %tmp3
         ret i16 %tmp4
 }
 
@@ -65,7 +65,7 @@ entry:
 ; CHECK: ldrh r0, [r0, r1]
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i16*
-        %tmp4 = load i16, i16* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        %tmp4 = load i16, ptr %tmp3
         ret i16 %tmp4
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-mul.ll b/llvm/test/CodeGen/Thumb2/thumb2-mul.ll
index 4815f4b5f751b..0c57c99127e09 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-mul.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-mul.ll
@@ -10,7 +10,7 @@ define i32 @f1(i32 %a, i32 %b, i32 %c) {
 %struct.CMPoint = type { %struct.Point, float, float, [5 x float] }
 %struct.Point = type { float, float }
 
-define %struct.CMPoint* @t1(i32 %i, i32 %j, i32 %n, %struct.CMPoint* %thePoints) nounwind readnone ssp {
+define ptr @t1(i32 %i, i32 %j, i32 %n, ptr %thePoints) nounwind readnone ssp {
 entry:
 ; CHECK-LABEL: t1:
 ; CHECK: mla     r0, r2, r0, r1
@@ -18,9 +18,9 @@ entry:
 ; CHECK: add.w   r0, r3, r0, lsl #2
   %mul = mul i32 %n, %i
   %add = add i32 %mul, %j
-  %0 = ptrtoint %struct.CMPoint* %thePoints to i32
+  %0 = ptrtoint ptr %thePoints to i32
   %mul5 = mul i32 %add, 36
   %add6 = add i32 %mul5, %0
-  %1 = inttoptr i32 %add6 to %struct.CMPoint*
-  ret %struct.CMPoint* %1
+  %1 = inttoptr i32 %add6 to ptr
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-smul.ll b/llvm/test/CodeGen/Thumb2/thumb2-smul.ll
index 53fca567af164..e038b09f7be7e 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-smul.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-smul.ll
@@ -1,12 +1,12 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+dsp %s -o - |  FileCheck %s
 
- at x = weak global i16 0          ; <i16*> [#uses=1]
- at y = weak global i16 0          ; <i16*> [#uses=0]
+ at x = weak global i16 0          ; <ptr> [#uses=1]
+ at y = weak global i16 0          ; <ptr> [#uses=0]
 
 define i32 @f1(i32 %y) {
 ; CHECK: f1
 ; CHECK: smulbt r0, r1, r0
-        %tmp = load i16, i16* @x             ; <i16> [#uses=1]
+        %tmp = load i16, ptr @x             ; <i16> [#uses=1]
         %tmp1 = add i16 %tmp, 2         ; <i16> [#uses=1]
         %tmp2 = sext i16 %tmp1 to i32           ; <i32> [#uses=1]
         %tmp3 = ashr i32 %y, 16         ; <i32> [#uses=1]

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll b/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
index 995c2bc3fea8b..ab01312b7ecf5 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
@@ -4,12 +4,12 @@
 %bar = type { float, float, float }
 %baz = type { i32, [16 x %bar], [16 x float], [16 x i32], i8 }
 %foo = type { <4 x float> }
-%quux = type { i32 (...)**, %baz*, i32 }
+%quux = type { ptr, ptr, i32 }
 %quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr, i32) nounwind readonly
 
-define void @aaa(%quuz* %this, i8* %block) {
+define void @aaa(ptr %this, ptr %block) {
 ; CHECK-LABEL: aaa:
 ; CHECK: bfc r4, #0, #4
 ; CHECK: vst1.64 {{.*}}[{{.*}}:128]
@@ -17,33 +17,32 @@ define void @aaa(%quuz* %this, i8* %block) {
 entry:
   %aligned_vec = alloca <4 x float>, align 16
   %"alloca point" = bitcast i32 0 to i32
-  %vecptr = bitcast <4 x float>* %aligned_vec to i8*
-  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %vecptr, i32 1) nounwind 
-  store float 6.300000e+01, float* undef, align 4
-  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
-  store float 0.000000e+00, float* undef, align 4
-  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
-  %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld9 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld10 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld11 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind
-  store float 0.000000e+00, float* undef, align 4
-  %val173 = load <4 x float>, <4 x float>* undef               ; <<4 x float>> [#uses=1]
+  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr %aligned_vec, i32 1) nounwind 
+  store float 6.300000e+01, ptr undef, align 4
+  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  store float 0.000000e+00, ptr undef, align 4
+  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld9 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld10 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld11 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0(ptr undef, i32 1) nounwind
+  store float 0.000000e+00, ptr undef, align 4
+  %val173 = load <4 x float>, ptr undef               ; <<4 x float>> [#uses=1]
   br label %bb4
 
 bb4:                                              ; preds = %bb193, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-str.ll b/llvm/test/CodeGen/Thumb2/thumb2-str.ll
index 9bda67ae406cb..96fb5bdddd57e 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-str.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-str.ll
@@ -1,34 +1,34 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i32 @f1(i32 %a, i32* %v) {
+define i32 @f1(i32 %a, ptr %v) {
 ; CHECK-LABEL: f1:
 ; CHECK: str r0, [r1]
-        store i32 %a, i32* %v
+        store i32 %a, ptr %v
         ret i32 %a
 }
 
-define i32 @f2(i32 %a, i32* %v) {
+define i32 @f2(i32 %a, ptr %v) {
 ; CHECK-LABEL: f2:
 ; CHECK: str.w r0, [r1, #4092]
-        %tmp2 = getelementptr i32, i32* %v, i32 1023
-        store i32 %a, i32* %tmp2
+        %tmp2 = getelementptr i32, ptr %v, i32 1023
+        store i32 %a, ptr %tmp2
         ret i32 %a
 }
 
-define i32 @f2a(i32 %a, i32* %v) {
+define i32 @f2a(i32 %a, ptr %v) {
 ; CHECK-LABEL: f2a:
 ; CHECK: str r0, [r1, #-128]
-        %tmp2 = getelementptr i32, i32* %v, i32 -32
-        store i32 %a, i32* %tmp2
+        %tmp2 = getelementptr i32, ptr %v, i32 -32
+        store i32 %a, ptr %tmp2
         ret i32 %a
 }
 
-define i32 @f3(i32 %a, i32* %v) {
+define i32 @f3(i32 %a, ptr %v) {
 ; CHECK-LABEL: f3:
 ; CHECK: mov.w r2, #4096
 ; CHECK: str r0, [r1, r2]
-        %tmp2 = getelementptr i32, i32* %v, i32 1024
-        store i32 %a, i32* %tmp2
+        %tmp2 = getelementptr i32, ptr %v, i32 1024
+        store i32 %a, ptr %tmp2
         ret i32 %a
 }
 
@@ -37,8 +37,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: str r0, [r1, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i32*
-        store i32 %a, i32* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i32 %a, ptr %tmp2
         ret i32 %a
 }
 
@@ -47,8 +47,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: str r0, [r1, r2]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i32*
-        store i32 %a, i32* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i32 %a, ptr %tmp2
         ret i32 %a
 }
 
@@ -58,8 +58,8 @@ entry:
 ; CHECK: str.w r0, [r1, r2, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i32*
-        store i32 %a, i32* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i32 %a, ptr %tmp3
         ret i32 %a
 }
 
@@ -70,7 +70,7 @@ entry:
 ; CHECK: str r0, [r1, r2]
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i32*
-        store i32 %a, i32* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i32 %a, ptr %tmp3
         ret i32 %a
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll b/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
index 377c814823cbc..cf9f8b093e9e2 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
@@ -1,22 +1,22 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i16 @test1(i32* %X, i16* %A) {
+define i16 @test1(ptr %X, ptr %A) {
 ; CHECK-LABEL: test1:
 ; CHECK: strh {{.*}}[{{.*}}], #-4
-        %Y = load i32, i32* %X               ; <i32> [#uses=1]
+        %Y = load i32, ptr %X               ; <i32> [#uses=1]
         %tmp1 = trunc i32 %Y to i16             ; <i16> [#uses=1]
-        store i16 %tmp1, i16* %A
-        %tmp2 = ptrtoint i16* %A to i16         ; <i16> [#uses=1]
+        store i16 %tmp1, ptr %A
+        %tmp2 = ptrtoint ptr %A to i16         ; <i16> [#uses=1]
         %tmp3 = sub i16 %tmp2, 4                ; <i16> [#uses=1]
         ret i16 %tmp3
 }
 
-define i32 @test2(i32* %X, i32* %A) {
+define i32 @test2(ptr %X, ptr %A) {
 ; CHECK-LABEL: test2:
 ; CHECK: str {{.*}}[{{.*}}],
-        %Y = load i32, i32* %X               ; <i32> [#uses=1]
-        store i32 %Y, i32* %A
-        %tmp1 = ptrtoint i32* %A to i32         ; <i32> [#uses=1]
+        %Y = load i32, ptr %X               ; <i32> [#uses=1]
+        store i32 %Y, ptr %A
+        %tmp1 = ptrtoint ptr %A to i32         ; <i32> [#uses=1]
         %tmp2 = sub i32 %tmp1, 4                ; <i32> [#uses=1]
         ret i32 %tmp2
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll b/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
index d69a1024fadb4..4855ca4ecf33b 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
@@ -1,21 +1,21 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define void @test1(i32* %X, i32* %A, i32** %dest) {
+define void @test1(ptr %X, ptr %A, ptr %dest) {
 ; CHECK: test1
 ; CHECK: str  r1, [r0, #16]!
-        %B = load i32, i32* %A               ; <i32> [#uses=1]
-        %Y = getelementptr i32, i32* %X, i32 4               ; <i32*> [#uses=2]
-        store i32 %B, i32* %Y
-        store i32* %Y, i32** %dest
+        %B = load i32, ptr %A               ; <i32> [#uses=1]
+        %Y = getelementptr i32, ptr %X, i32 4               ; <ptr> [#uses=2]
+        store i32 %B, ptr %Y
+        store ptr %Y, ptr %dest
         ret void
 }
 
-define i16* @test2(i16* %X, i32* %A) {
+define ptr @test2(ptr %X, ptr %A) {
 ; CHECK: test2
 ; CHECK: strh r1, [r0, #8]!
-        %B = load i32, i32* %A               ; <i32> [#uses=1]
-        %Y = getelementptr i16, i16* %X, i32 4               ; <i16*> [#uses=2]
+        %B = load i32, ptr %A               ; <i32> [#uses=1]
+        %Y = getelementptr i16, ptr %X, i32 4               ; <ptr> [#uses=2]
         %tmp = trunc i32 %B to i16              ; <i16> [#uses=1]
-        store i16 %tmp, i16* %Y
-        ret i16* %Y
+        store i16 %tmp, ptr %Y
+        ret ptr %Y
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-strb.ll b/llvm/test/CodeGen/Thumb2/thumb2-strb.ll
index 8ee9d2d158ea0..6f55acef690ce 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-strb.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-strb.ll
@@ -1,34 +1,34 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i8 @f1(i8 %a, i8* %v) {
+define i8 @f1(i8 %a, ptr %v) {
 ; CHECK-LABEL: f1:
 ; CHECK: strb r0, [r1]
-        store i8 %a, i8* %v
+        store i8 %a, ptr %v
         ret i8 %a
 }
 
-define i8 @f2(i8 %a, i8* %v) {
+define i8 @f2(i8 %a, ptr %v) {
 ; CHECK-LABEL: f2:
 ; CHECK: strb.w r0, [r1, #4092]
-        %tmp2 = getelementptr i8, i8* %v, i32 4092
-        store i8 %a, i8* %tmp2
+        %tmp2 = getelementptr i8, ptr %v, i32 4092
+        store i8 %a, ptr %tmp2
         ret i8 %a
 }
 
-define i8 @f2a(i8 %a, i8* %v) {
+define i8 @f2a(i8 %a, ptr %v) {
 ; CHECK-LABEL: f2a:
 ; CHECK: strb r0, [r1, #-128]
-        %tmp2 = getelementptr i8, i8* %v, i32 -128
-        store i8 %a, i8* %tmp2
+        %tmp2 = getelementptr i8, ptr %v, i32 -128
+        store i8 %a, ptr %tmp2
         ret i8 %a
 }
 
-define i8 @f3(i8 %a, i8* %v) {
+define i8 @f3(i8 %a, ptr %v) {
 ; CHECK-LABEL: f3:
 ; CHECK: mov.w r2, #4096
 ; CHECK: strb r0, [r1, r2]
-        %tmp2 = getelementptr i8, i8* %v, i32 4096
-        store i8 %a, i8* %tmp2
+        %tmp2 = getelementptr i8, ptr %v, i32 4096
+        store i8 %a, ptr %tmp2
         ret i8 %a
 }
 
@@ -37,8 +37,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: strb r0, [r1, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i8*
-        store i8 %a, i8* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i8 %a, ptr %tmp2
         ret i8 %a
 }
 
@@ -47,8 +47,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: strb r0, [r1, r2]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i8*
-        store i8 %a, i8* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i8 %a, ptr %tmp2
         ret i8 %a
 }
 
@@ -58,8 +58,8 @@ entry:
 ; CHECK: strb.w r0, [r1, r2, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i8*
-        store i8 %a, i8* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i8 %a, ptr %tmp3
         ret i8 %a
 }
 
@@ -70,7 +70,7 @@ entry:
 ; CHECK: strb r0, [r1, r2]
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i8*
-        store i8 %a, i8* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i8 %a, ptr %tmp3
         ret i8 %a
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-strh.ll b/llvm/test/CodeGen/Thumb2/thumb2-strh.ll
index dfd1c90d96232..b5e4988db90e5 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-strh.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-strh.ll
@@ -1,34 +1,34 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
 
-define i16 @f1(i16 %a, i16* %v) {
+define i16 @f1(i16 %a, ptr %v) {
 ; CHECK-LABEL: f1:
 ; CHECK: strh r0, [r1]
-        store i16 %a, i16* %v
+        store i16 %a, ptr %v
         ret i16 %a
 }
 
-define i16 @f2(i16 %a, i16* %v) {
+define i16 @f2(i16 %a, ptr %v) {
 ; CHECK-LABEL: f2:
 ; CHECK: strh.w r0, [r1, #4092]
-        %tmp2 = getelementptr i16, i16* %v, i32 2046
-        store i16 %a, i16* %tmp2
+        %tmp2 = getelementptr i16, ptr %v, i32 2046
+        store i16 %a, ptr %tmp2
         ret i16 %a
 }
 
-define i16 @f2a(i16 %a, i16* %v) {
+define i16 @f2a(i16 %a, ptr %v) {
 ; CHECK-LABEL: f2a:
 ; CHECK: strh r0, [r1, #-128]
-        %tmp2 = getelementptr i16, i16* %v, i32 -64
-        store i16 %a, i16* %tmp2
+        %tmp2 = getelementptr i16, ptr %v, i32 -64
+        store i16 %a, ptr %tmp2
         ret i16 %a
 }
 
-define i16 @f3(i16 %a, i16* %v) {
+define i16 @f3(i16 %a, ptr %v) {
 ; CHECK-LABEL: f3:
 ; CHECK: mov.w r2, #4096
 ; CHECK: strh r0, [r1, r2]
-        %tmp2 = getelementptr i16, i16* %v, i32 2048
-        store i16 %a, i16* %tmp2
+        %tmp2 = getelementptr i16, ptr %v, i32 2048
+        store i16 %a, ptr %tmp2
         ret i16 %a
 }
 
@@ -37,8 +37,8 @@ entry:
 ; CHECK-LABEL: f4:
 ; CHECK: strh r0, [r1, #-128]
         %tmp1 = sub i32 %base, 128
-        %tmp2 = inttoptr i32 %tmp1 to i16*
-        store i16 %a, i16* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i16 %a, ptr %tmp2
         ret i16 %a
 }
 
@@ -47,8 +47,8 @@ entry:
 ; CHECK-LABEL: f5:
 ; CHECK: strh r0, [r1, r2]
         %tmp1 = add i32 %base, %offset
-        %tmp2 = inttoptr i32 %tmp1 to i16*
-        store i16 %a, i16* %tmp2
+        %tmp2 = inttoptr i32 %tmp1 to ptr
+        store i16 %a, ptr %tmp2
         ret i16 %a
 }
 
@@ -58,8 +58,8 @@ entry:
 ; CHECK: strh.w r0, [r1, r2, lsl #2]
         %tmp1 = shl i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i16*
-        store i16 %a, i16* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i16 %a, ptr %tmp3
         ret i16 %a
 }
 
@@ -70,7 +70,7 @@ entry:
 ; CHECK: strh r0, [r1, r2]
         %tmp1 = lshr i32 %offset, 2
         %tmp2 = add i32 %base, %tmp1
-        %tmp3 = inttoptr i32 %tmp2 to i16*
-        store i16 %a, i16* %tmp3
+        %tmp3 = inttoptr i32 %tmp2 to ptr
+        store i16 %a, ptr %tmp3
         ret i16 %a
 }

diff  --git a/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll b/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
index fd8070a8b583a..398c4cc6781f7 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
@@ -9,22 +9,22 @@
 
 ; Thumb2 target should reorder the bb's in order to use tbb / tbh.
 
-	%struct.R_flstr = type { i32, i32, i8* }
-	%struct._T_tstr = type { i32, %struct.R_flstr*, %struct._T_tstr* }
- at _C_nextcmd = external global i32		; <i32*> [#uses=3]
- at .str31 = external constant [28 x i8], align 1		; <[28 x i8]*> [#uses=1]
- at _T_gtol = external global %struct._T_tstr*		; <%struct._T_tstr**> [#uses=2]
+	%struct.R_flstr = type { i32, i32, ptr }
+	%struct._T_tstr = type { i32, ptr, ptr }
+ at _C_nextcmd = external global i32		; <ptr> [#uses=3]
+ at .str31 = external constant [28 x i8], align 1		; <ptr> [#uses=1]
+ at _T_gtol = external global ptr		; <ptr> [#uses=2]
 
-declare i32 @strlen(i8* nocapture) nounwind readonly
+declare i32 @strlen(ptr nocapture) nounwind readonly
 
-declare void @Z_fatal(i8*) noreturn nounwind
+declare void @Z_fatal(ptr) noreturn nounwind
 
-declare noalias i8* @calloc(i32, i32) nounwind
+declare noalias ptr @calloc(i32, i32) nounwind
 
 ; Jump tables are not anchored next to the TBB/TBH any more. Make sure the
 ; correct address is still calculated (i.e. via a PC-relative symbol *at* the
 ; TBB/TBH).
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind {
 ; CHECK-LABEL: main:
 ; CHECK-NOT: adr {{r[0-9]+}}, LJTI
 ; T1:          lsls r[[x:[0-9]+]], {{r[0-9]+}}, #1
@@ -47,39 +47,39 @@ bb5.i:		; preds = %bb42.i
 	br label %bb40.i
 
 bb7.i:		; preds = %bb42.i
-	call  void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 0, i8* null) nounwind
+	call  void @_T_addtol(ptr @_T_gtol, i32 0, ptr null) nounwind
 	unreachable
 
 bb15.i:		; preds = %bb42.i
-	call  void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 2, i8* null) nounwind
+	call  void @_T_addtol(ptr @_T_gtol, i32 2, ptr null) nounwind
 	unreachable
 
 bb23.i:		; preds = %bb42.i
-	%1 = call  i32 @strlen(i8* null) nounwind readonly		; <i32> [#uses=0]
+	%1 = call  i32 @strlen(ptr null) nounwind readonly		; <i32> [#uses=0]
 	unreachable
 
 bb33.i:		; preds = %bb42.i
-	store i32 0, i32* @_C_nextcmd, align 4
-	%2 = call  noalias i8* @calloc(i32 21, i32 1) nounwind		; <i8*> [#uses=0]
+	store i32 0, ptr @_C_nextcmd, align 4
+	%2 = call  noalias ptr @calloc(i32 21, i32 1) nounwind		; <ptr> [#uses=0]
 	unreachable
 
 bb34.i:		; preds = %bb42.i
-	%3 = load i32, i32* @_C_nextcmd, align 4		; <i32> [#uses=1]
+	%3 = load i32, ptr @_C_nextcmd, align 4		; <i32> [#uses=1]
 	%4 = add i32 %3, 1		; <i32> [#uses=1]
-	store i32 %4, i32* @_C_nextcmd, align 4
-	%5 = call  noalias i8* @calloc(i32 22, i32 1) nounwind		; <i8*> [#uses=0]
+	store i32 %4, ptr @_C_nextcmd, align 4
+	%5 = call  noalias ptr @calloc(i32 22, i32 1) nounwind		; <ptr> [#uses=0]
 	unreachable
 
 bb35.i:		; preds = %bb42.i
-	%6 = call  noalias i8* @calloc(i32 20, i32 1) nounwind		; <i8*> [#uses=0]
+	%6 = call  noalias ptr @calloc(i32 20, i32 1) nounwind		; <ptr> [#uses=0]
 	unreachable
 
 bb37.i:		; preds = %bb42.i
-	%7 = call  noalias i8* @calloc(i32 14, i32 1) nounwind		; <i8*> [#uses=0]
+	%7 = call  noalias ptr @calloc(i32 14, i32 1) nounwind		; <ptr> [#uses=0]
 	unreachable
 
 bb39.i:		; preds = %bb42.i
-	call  void @Z_fatal(i8* getelementptr ([28 x i8], [28 x i8]* @.str31, i32 0, i32 0)) nounwind
+	call  void @Z_fatal(ptr @.str31) nounwind
 	unreachable
 
 bb40.i:		; preds = %bb42.i, %bb5.i, %bb1.i2
@@ -100,4 +100,4 @@ bb42.i:		; preds = %bb40.i, %entry
 	]
 }
 
-declare void @_T_addtol(%struct._T_tstr** nocapture, i32, i8*) nounwind
+declare void @_T_addtol(ptr nocapture, i32, ptr) nounwind

diff  --git a/llvm/test/CodeGen/Thumb2/tls1.ll b/llvm/test/CodeGen/Thumb2/tls1.ll
index 6e46433ec8484..8755f575173b7 100644
--- a/llvm/test/CodeGen/Thumb2/tls1.ll
+++ b/llvm/test/CodeGen/Thumb2/tls1.ll
@@ -6,15 +6,15 @@
 ; RUN:     -relocation-model=pic | grep "__tls_get_addr"
 
 
- at i = dso_local thread_local global i32 15		; <i32*> [#uses=2]
+ at i = dso_local thread_local global i32 15		; <ptr> [#uses=2]
 
 define dso_local i32 @f() {
 entry:
-	%tmp1 = load i32, i32* @i		; <i32> [#uses=1]
+	%tmp1 = load i32, ptr @i		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
 
-define dso_local i32* @g() {
+define dso_local ptr @g() {
 entry:
-	ret i32* @i
+	ret ptr @i
 }

diff  --git a/llvm/test/CodeGen/Thumb2/tls2.ll b/llvm/test/CodeGen/Thumb2/tls2.ll
index 98ae8e6d90d9b..2bd0f67f6cbdd 100644
--- a/llvm/test/CodeGen/Thumb2/tls2.ll
+++ b/llvm/test/CodeGen/Thumb2/tls2.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | FileCheck %s -check-prefix=CHECK-NOT-PIC
 ; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=CHECK-PIC
 
- at i = external thread_local global i32		; <i32*> [#uses=2]
+ at i = external thread_local global i32		; <ptr> [#uses=2]
 
 define i32 @f() {
 entry:
@@ -12,11 +12,11 @@ entry:
 
 ; CHECK-PIC-LABEL: f:
 ; CHECK-PIC: bl __tls_get_addr
-	%tmp1 = load i32, i32* @i		; <i32> [#uses=1]
+	%tmp1 = load i32, ptr @i		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
 
-define i32* @g() {
+define ptr @g() {
 entry:
 ; CHECK-NOT-PIC-LABEL: g:
 ; CHECK-NOT-PIC: add r0, pc
@@ -25,5 +25,5 @@ entry:
 
 ; CHECK-PIC-LABEL: g:
 ; CHECK-PIC: bl __tls_get_addr
-	ret i32* @i
+	ret ptr @i
 }

diff  --git a/llvm/test/CodeGen/Thumb2/tpsoft.ll b/llvm/test/CodeGen/Thumb2/tpsoft.ll
index 7337ca9345bb0..2454bd0a98580 100644
--- a/llvm/test/CodeGen/Thumb2/tpsoft.ll
+++ b/llvm/test/CodeGen/Thumb2/tpsoft.ll
@@ -49,14 +49,14 @@ define arm_aapcs_vfpcc i32 @main() nounwind {
 ; ELFASM-NEXT:  .Ltmp0:
 ; ELFASM-NEXT:    .long i(GOTTPOFF)-((.LPC0_0+4)-.Ltmp0)
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   switch i32 %0, label %bb2 [
     i32 12, label %bb
     i32 13, label %bb1
   ]
 
 bb:                                               ; preds = %entry
-  %1 = tail call arm_aapcs_vfpcc  i32 @foo(i8* @a) nounwind
+  %1 = tail call arm_aapcs_vfpcc  i32 @foo(ptr @a) nounwind
   ret i32 %1
 
 
@@ -74,13 +74,13 @@ bb:                                               ; preds = %entry
 
 
 bb1:                                              ; preds = %entry
-  %2 = tail call arm_aapcs_vfpcc  i32 @bar(i32* bitcast ([10 x i8]* @b to i32*)) nounwind
+  %2 = tail call arm_aapcs_vfpcc  i32 @bar(ptr @b) nounwind
   ret i32 %2
 
 bb2:                                              ; preds = %entry
   ret i32 -1
 }
 
-declare arm_aapcs_vfpcc i32 @foo(i8*)
+declare arm_aapcs_vfpcc i32 @foo(ptr)
 
-declare arm_aapcs_vfpcc i32 @bar(i32*)
+declare arm_aapcs_vfpcc i32 @bar(ptr)

diff  --git a/llvm/test/CodeGen/Thumb2/unreachable-large-offset-gep.ll b/llvm/test/CodeGen/Thumb2/unreachable-large-offset-gep.ll
index 641787b0d7f24..00710c3ed3681 100644
--- a/llvm/test/CodeGen/Thumb2/unreachable-large-offset-gep.ll
+++ b/llvm/test/CodeGen/Thumb2/unreachable-large-offset-gep.ll
@@ -5,7 +5,7 @@
 
 target triple = "thumbv8m-unknown-linux-android"
 
-define void @d(i32* %c) {
+define void @d(ptr %c) {
 entry:
   br i1 false, label %f.exit, label %i.d
 
@@ -13,10 +13,10 @@ i.d:
   br label %i.d
 
 f.exit:
-  %0 = getelementptr i32, i32* %c, i32 57
+  %0 = getelementptr i32, ptr %c, i32 57
   br label %if.g
 
 if.g:
-  store i32 0, i32* %0
+  store i32 0, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_1.ll b/llvm/test/CodeGen/Thumb2/v8_IT_1.ll
index 948f159c343db..1f9e006ab8ed1 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_1.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_1.ll
@@ -4,14 +4,14 @@
 ;CHECK-LABEL: select_s_v_v:
 ;CHECK-NOT: it
 ;CHECK: bx
-define <16 x i8> @select_s_v_v(i32 %avail, i8* %bar) {
+define <16 x i8> @select_s_v_v(i32 %avail, ptr %bar) {
 entry:
-  %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %bar, i32 1)
+  %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr %bar, i32 1)
   %and = and i32 %avail, 1
   %tobool = icmp eq i32 %and, 0
   %vld1. = select i1 %tobool, <16 x i8> %vld1, <16 x i8> zeroinitializer
   ret <16 x i8> %vld1.
 }
 
-declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* , i32 )
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0(ptr , i32 )
 

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_2.ll b/llvm/test/CodeGen/Thumb2/v8_IT_2.ll
index 9a3f263c52590..125641cbec6fb 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_2.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_2.ll
@@ -1,9 +1,9 @@
 ; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
 ; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck %s
 
-	%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
+	%struct.quad_struct = type { i32, i32, ptr, ptr, ptr, ptr, ptr }
 
-define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
+define fastcc i32 @CountTree(ptr %tree) {
 entry:
 ; CHECK-LABEL: CountTree:
 ; CHECK: bne
@@ -16,20 +16,20 @@ entry:
 	br label %tailrecurse
 
 tailrecurse:		; preds = %bb, %entry
-	%tmp6 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
-	%tmp9 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=2]
-	%tmp12 = load %struct.quad_struct*, %struct.quad_struct** null		; <%struct.quad_struct*> [#uses=1]
-	%tmp14 = icmp eq %struct.quad_struct* null, null		; <i1> [#uses=1]
-	%tmp17 = icmp eq %struct.quad_struct* %tmp6, null		; <i1> [#uses=1]
-	%tmp23 = icmp eq %struct.quad_struct* %tmp9, null		; <i1> [#uses=1]
-	%tmp29 = icmp eq %struct.quad_struct* %tmp12, null		; <i1> [#uses=1]
+	%tmp6 = load ptr, ptr null		; <ptr> [#uses=1]
+	%tmp9 = load ptr, ptr null		; <ptr> [#uses=2]
+	%tmp12 = load ptr, ptr null		; <ptr> [#uses=1]
+	%tmp14 = icmp eq ptr null, null		; <i1> [#uses=1]
+	%tmp17 = icmp eq ptr %tmp6, null		; <i1> [#uses=1]
+	%tmp23 = icmp eq ptr %tmp9, null		; <i1> [#uses=1]
+	%tmp29 = icmp eq ptr %tmp12, null		; <i1> [#uses=1]
 	%bothcond = and i1 %tmp17, %tmp14		; <i1> [#uses=1]
 	%bothcond1 = and i1 %bothcond, %tmp23		; <i1> [#uses=1]
 	%bothcond2 = and i1 %bothcond1, %tmp29		; <i1> [#uses=1]
 	br i1 %bothcond2, label %return, label %bb
 
 bb:		; preds = %tailrecurse
-	%tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 )		; <i32> [#uses=0]
+	%tmp41 = tail call fastcc i32 @CountTree( ptr %tmp9 )		; <i32> [#uses=0]
 	br label %tailrecurse
 
 return:		; preds = %tailrecurse

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_3.ll b/llvm/test/CodeGen/Thumb2/v8_IT_3.ll
index 9b616736b2fad..d9d355272bd07 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_3.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_3.ll
@@ -3,10 +3,10 @@
 ; RUN: llc < %s -mtriple=thumbv8 -arm-atomic-cfg-tidy=0 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
 ; RUN: llc < %s -mtriple=thumbv7 -arm-atomic-cfg-tidy=0 -arm-restrict-it -relocation-model=pic | FileCheck %s --check-prefix=CHECK-PIC
 
-%struct.FF = type { i32 (i32*)*, i32 (i32*, i32*, i32, i32, i32, i32)*, i32 (i32, i32, i8*)*, void ()*, i32 (i32, i8*, i32*)*, i32 ()* }
-%struct.BD = type { %struct.BD*, i32, i32, i32, i32, i64, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i8*, i64, i32)*, i32 (%struct.BD*, i8*, i32, i32)*, i32 (%struct.BD*, i64, i32)*, [16 x i8], i64, i64 }
+%struct.FF = type { ptr, ptr, ptr, ptr, ptr, ptr }
+%struct.BD = type { ptr, i32, i32, i32, i32, i64, ptr, ptr, ptr, ptr, ptr, [16 x i8], i64, i64 }
 
- at FuncPtr = external hidden unnamed_addr global %struct.FF*
+ at FuncPtr = external hidden unnamed_addr global ptr
 @.str1 = external hidden unnamed_addr constant [6 x i8], align 4
 @G = external unnamed_addr global i32
 @.str2 = external hidden unnamed_addr constant [58 x i8], align 4
@@ -20,8 +20,8 @@ entry:
   %block_size = alloca i32, align 4
   %block_count = alloca i32, align 4
   %index_cache = alloca i32, align 4
-  store i32 0, i32* %index_cache, align 4
-  %tmp = load i32, i32* @G, align 4
+  store i32 0, ptr %index_cache, align 4
+  %tmp = load i32, ptr @G, align 4
   %tmp1 = call i32 @bar(i32 0, i32 0, i32 %tmp) nounwind
   switch i32 %tmp1, label %bb8 [
     i32 1, label %bb
@@ -30,7 +30,7 @@ entry:
   ]
 
 bb:
-  %tmp2 = load i32, i32* @G, align 4
+  %tmp2 = load i32, ptr @G, align 4
   %tmp4 = icmp eq i32 %tmp2, 1
   br i1 %tmp4, label %bb1, label %bb8
 
@@ -41,14 +41,14 @@ bb1:
 ; CHECK-NEXT: it       eq
 ; CHECK-NEXT: cmpeq
 ; CHECK: %bb1
-  %tmp5 = load i32, i32* %block_size, align 4
-  %tmp6 = load i32, i32* %block_count, align 4
-  %tmp7 = call %struct.FF* @Get() nounwind
-  store %struct.FF* %tmp7, %struct.FF** @FuncPtr, align 4
+  %tmp5 = load i32, ptr %block_size, align 4
+  %tmp6 = load i32, ptr %block_count, align 4
+  %tmp7 = call ptr @Get() nounwind
+  store ptr %tmp7, ptr @FuncPtr, align 4
   %tmp10 = zext i32 %tmp6 to i64
   %tmp11 = zext i32 %tmp5 to i64
   %tmp12 = mul nsw i64 %tmp10, %tmp11
-  %tmp13 = call i32 @foo(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str1, i32 0, i32 0), i64 %tmp12, i32 %tmp5) nounwind
+  %tmp13 = call i32 @foo(ptr @.str1, i64 %tmp12, i32 %tmp5) nounwind
   br label %bb8
 
 bb4:
@@ -71,10 +71,10 @@ bb8:
   ret i32 -1
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
-declare %struct.FF* @Get()
+declare ptr @Get()
 
-declare i32 @foo(i8*, i64, i32)
+declare i32 @foo(ptr, i64, i32)
 
 declare i32 @bar(i32, i32, i32)

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
index d521e741708f2..fb6c86ab9fda9 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=thumbv8-eabi -float-abi=hard -regalloc=basic -arm-restrict-it | FileCheck --check-prefixes=P23 %s
 ; RUN: llc < %s -mtriple=thumbv7-eabi -float-abi=hard -regalloc=basic -arm-restrict-it | FileCheck --check-prefixes=P23 %s
 
-%"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { i8* }
+%"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { ptr }
 %"struct.__gnu_cxx::new_allocator<char>" = type <{ i8 }>
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >" = type { %"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" }
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep" = type { %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" }
 %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" = type { i32, i32, i32 }
 
 
-define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
+define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(ptr %this, ptr %__str) {
 ; P01-LABEL: _ZNKSs7compareERKSs:
 ; P01:       @ %bb.0: @ %entry
 ; P01-NEXT:    .save {r4, r5, r6, r7, r8, lr}
@@ -72,13 +72,13 @@ define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<
 ; P23-NEXT:    moveq r0, r1
 ; P23-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
-  %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
-  %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i32> [#uses=3]
+  %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(ptr %this) ; <i32> [#uses=3]
+  %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(ptr %__str) ; <i32> [#uses=3]
   %2 = icmp ult i32 %1, %0                        ; <i1> [#uses=1]
   %3 = select i1 %2, i32 %1, i32 %0               ; <i32> [#uses=1]
-  %4 = tail call arm_aapcs_vfpcc  i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i8*> [#uses=1]
-  %5 = tail call arm_aapcs_vfpcc  i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i8*> [#uses=1]
-  %6 = tail call arm_aapcs_vfpcc  i32 @memcmp(i8* %4, i8* %5, i32 %3) nounwind readonly ; <i32> [#uses=2]
+  %4 = tail call arm_aapcs_vfpcc  ptr @_ZNKSs7_M_dataEv(ptr %this) ; <ptr> [#uses=1]
+  %5 = tail call arm_aapcs_vfpcc  ptr @_ZNKSs4dataEv(ptr %__str) ; <ptr> [#uses=1]
+  %6 = tail call arm_aapcs_vfpcc  i32 @memcmp(ptr %4, ptr %5, i32 %3) nounwind readonly ; <i32> [#uses=2]
   %7 = icmp eq i32 %6, 0                          ; <i1> [#uses=1]
   br i1 %7, label %bb, label %bb1
 
@@ -90,10 +90,10 @@ bb1:                                              ; preds = %entry
   ret i32 %6
 }
 
-declare arm_aapcs_vfpcc i32 @memcmp(i8* nocapture, i8* nocapture, i32) nounwind readonly
+declare arm_aapcs_vfpcc i32 @memcmp(ptr nocapture, ptr nocapture, i32) nounwind readonly
 
-declare arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(ptr %this)
 
-declare arm_aapcs_vfpcc i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc ptr @_ZNKSs7_M_dataEv(ptr %this)
 
-declare arm_aapcs_vfpcc i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
+declare arm_aapcs_vfpcc ptr @_ZNKSs4dataEv(ptr %this)

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_5.ll b/llvm/test/CodeGen/Thumb2/v8_IT_5.ll
index 52892c948329c..6ecfbf4f844e2 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_5.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_5.ll
@@ -30,11 +30,11 @@ if.then115:
   br i1 undef, label %if.else163, label %if.else145
 
 if.else145:
-  %call150 = call fastcc %struct.hc* @foo(%struct.hc* undef, i32 34865152) optsize
+  %call150 = call fastcc ptr @foo(ptr undef, i32 34865152) optsize
   br label %while.body172
 
 if.else163:
-  %call168 = call fastcc %struct.hc* @foo(%struct.hc* undef, i32 34078720) optsize
+  %call168 = call fastcc ptr @foo(ptr undef, i32 34078720) optsize
   br label %while.body172
 
 while.body172:
@@ -44,4 +44,4 @@ if.else173:
   ret i32 -1
 }
 
-declare hidden fastcc %struct.hc* @foo(%struct.hc* nocapture, i32) nounwind optsize
+declare hidden fastcc ptr @foo(ptr nocapture, i32) nounwind optsize


        


More information about the llvm-commits mailing list